code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __UpperCAmelCase ( lowerCamelCase_ : int = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCamelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase__ : Tuple = '''facebook/wmt19-en-de'''
UpperCamelCase__ : Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase__ : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase__ : List[str] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCamelCase__ : Optional[int] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
UpperCamelCase__ : Any = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
UpperCamelCase__ : Dict = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
import math
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( lowerCamelCase_ : int = 1_00_01 ) -> int:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ : List[str] = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Dict = 2
while len(lowerCamelCase_ ) < nth:
if is_prime(lowerCamelCase_ ):
primes.append(lowerCamelCase_ )
num += 1
else:
num += 1
return primes[len(lowerCamelCase_ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : bool = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowerCamelCase_ ), magnitude * sin(lowerCamelCase_ )]
return [magnitude * cos(radians(lowerCamelCase_ ) ), magnitude * sin(radians(lowerCamelCase_ ) )]
def __UpperCAmelCase ( lowerCamelCase_ : NDArray[floataa] , lowerCamelCase_ : NDArray[floataa] , lowerCamelCase_ : float = 10**-1 ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = cross(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : float = sum(lowerCamelCase_ )
return abs(lowerCamelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCamelCase__ : List[str] = array(
[
polar_force(7_18.4, 1_80 - 30),
polar_force(8_79.54, 45),
polar_force(1_00, -90),
]
)
UpperCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCamelCase__ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
UpperCamelCase__ : str = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCamelCase__ : List[Any] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
UpperCamelCase__ : Union[str, Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 685 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 1 |
import argparse
import os
import re
UpperCamelCase__ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase__ : str = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCamelCase__ : int = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : bool = False ) -> List[Any]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.read()
SCREAMING_SNAKE_CASE_ : List[Any] = content.split('\n' )
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = 0
while line_idx < len(lowerCamelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE_ : Any = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE_ : Tuple = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE_ : str = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : _re_identifier.search(lowerCamelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowerCamelCase_ ) )
elif "\n".join(lowerCamelCase_ ) != content:
return True
def __UpperCAmelCase ( lowerCamelCase_ : bool = False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for f in os.listdir(lowerCamelCase_ ) if f.endswith('.py' )]
SCREAMING_SNAKE_CASE_ : int = [sort_auto_mapping(lowerCamelCase_ , overwrite=lowerCamelCase_ ) for fname in fnames]
if not overwrite and any(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = [f for f, d in zip(lowerCamelCase_ , lowerCamelCase_ ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowerCamelCase_ )}. Run `make style` to fix'
' this.' )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase__ : Tuple = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 685 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Any = ["pixel_values"]
def __init__( self ,snake_case__ = True ,snake_case__ = None ,snake_case__ = PILImageResampling.BICUBIC ,snake_case__ = True ,snake_case__ = None ,snake_case__ = True ,snake_case__ = 1 / 255 ,snake_case__ = True ,snake_case__ = None ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(snake_case__ ,default_to_square=snake_case__ ,param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size
SCREAMING_SNAKE_CASE_ : Optional[Any] = resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] = crop_size
SCREAMING_SNAKE_CASE_ : Dict = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Any = do_convert_rgb
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = PILImageResampling.BICUBIC ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(snake_case__ ,size=size['shortest_edge'] ,default_to_square=snake_case__ )
return resize(snake_case__ ,size=snake_case__ ,resample=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case__ ,size=(size['height'], size['width']) ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
return rescale(snake_case__ ,scale=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
return normalize(snake_case__ ,mean=snake_case__ ,std=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = ChannelDimension.FIRST ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(snake_case__ ,param_name='size' ,default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(snake_case__ ,param_name='crop_size' ,default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : Tuple = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : Optional[int] = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : int = [self.resize(image=snake_case__ ,size=snake_case__ ,resample=snake_case__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[str] = [self.center_crop(image=snake_case__ ,size=snake_case__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.rescale(image=snake_case__ ,scale=snake_case__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : int = [self.normalize(image=snake_case__ ,mean=snake_case__ ,std=snake_case__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Any = [to_channel_dimension_format(snake_case__ ,snake_case__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': images}
return BatchFeature(data=snake_case__ ,tensor_type=snake_case__ )
| 685 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 1 |
from math import pi
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCamelCase__ : str = (7_20, 12_80) # Height, Width
UpperCamelCase__ : Optional[int] = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCamelCase__ : List[Any] = 1 / 1_00
UpperCamelCase__ : Tuple = ''''''
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : Dict = ''''''
UpperCamelCase__ : Dict = 2_50
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = get_dataset(__A , __A )
for index in range(__A ):
SCREAMING_SNAKE_CASE_ : List[str] = random.sample(range(len(__A ) ) , 4 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = update_image_and_anno(
__A , __A , __A , __A , __A , filter_scale=__A , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_chars(32 )
SCREAMING_SNAKE_CASE_ : int = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
SCREAMING_SNAKE_CASE_ : int = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(F'{file_root}.jpg' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
SCREAMING_SNAKE_CASE_ : str = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_ : Optional[int] = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_ : List[str] = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_ : Tuple = anno[1] + width / 2
SCREAMING_SNAKE_CASE_ : Any = anno[2] + height / 2
SCREAMING_SNAKE_CASE_ : Optional[int] = F'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__A )
with open(F'{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : int = []
for label_file in glob.glob(os.path.join(__A , '*.txt' ) ):
SCREAMING_SNAKE_CASE_ : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__A ) as in_file:
SCREAMING_SNAKE_CASE_ : List[Any] = in_file.readlines()
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(__A , F'{label_name}.jpg' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ : Any = obj_list.rstrip('\n' ).split(' ' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ : Dict = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = 0.0 , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ : Tuple = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_ : str = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, index in enumerate(__A ):
SCREAMING_SNAKE_CASE_ : Tuple = all_img_list[index]
path_list.append(__A )
SCREAMING_SNAKE_CASE_ : List[str] = all_annos[index]
SCREAMING_SNAKE_CASE_ : Tuple = cva.imread(__A )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_ : List[str] = cva.resize(__A , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ : Dict = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_ : List[str] = cva.resize(__A , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ : int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : Tuple = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : Any = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ : Tuple = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_ : Any = cva.resize(__A , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ : Dict = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ : Tuple = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ : Any = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_ : List[str] = cva.resize(
__A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ : Tuple = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : str = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ : int = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_ : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ : str = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase_ ( snake_case__ ):
__a : Union[str, Any] = """mobilenet_v2"""
def __init__( self ,snake_case__=3 ,snake_case__=224 ,snake_case__=1.0 ,snake_case__=8 ,snake_case__=8 ,snake_case__=6 ,snake_case__=32 ,snake_case__=True ,snake_case__=True ,snake_case__="relu6" ,snake_case__=True ,snake_case__=0.8 ,snake_case__=0.02 ,snake_case__=0.001 ,snake_case__=255 ,**snake_case__ ,):
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : Tuple = depth_multiplier
SCREAMING_SNAKE_CASE_ : Dict = depth_divisible_by
SCREAMING_SNAKE_CASE_ : int = min_depth
SCREAMING_SNAKE_CASE_ : Union[str, Any] = expand_ratio
SCREAMING_SNAKE_CASE_ : Any = output_stride
SCREAMING_SNAKE_CASE_ : str = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : str = finegrained_output
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = tf_padding
SCREAMING_SNAKE_CASE_ : Dict = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = semantic_loss_ignore_index
class lowerCAmelCase_ ( snake_case__ ):
__a : int = version.parse("1.11" )
@property
def snake_case ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def snake_case ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def snake_case ( self ):
return 1E-4
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(A_ )
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo' )
SCREAMING_SNAKE_CASE_ : str = dataset['train'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(A_ ,return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**A_ )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.logits
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 16) )
self.assertEqual(logits.shape ,A_ )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[-0.4158, -0.4092, -0.4347] ,device=A_ ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,A_ ,atol=1E-4 ) )
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_ :
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
return None
class lowerCAmelCase_ :
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
return None
class lowerCAmelCase_ ( unittest.TestCase ):
__a : str = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase_ ,'tf' ,12 ,**UpperCamelCase_ )
@require_torch
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase_ ,'pt' ,12 ,**UpperCamelCase_ )
@require_torch
@slow
def snake_case ( self ):
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(UpperCamelCase_ ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE_ : Any = BertModel(BertConfig(vocab_size=len(UpperCamelCase_ ) ) )
model.save_pretrained(UpperCamelCase_ )
self._test_export(UpperCamelCase_ ,'pt' ,12 ,UpperCamelCase_ )
@require_tf
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : Dict = self._test_export(UpperCamelCase_ ,'tf' ,12 ,**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = quantize(Path(UpperCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : int = self._test_export(UpperCamelCase_ ,'pt' ,12 ,**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = quantize(UpperCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : int = Path(UpperCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,**UpperCamelCase_ )
return path
except Exception as e:
self.fail(UpperCamelCase_ )
@require_torch
@require_tokenizers
@slow
def snake_case ( self ):
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : List[str] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase_ ,UpperCamelCase_ ,'pt' )
@require_tf
@require_tokenizers
@slow
def snake_case ( self ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE_ : Any = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase_ ,UpperCamelCase_ ,'tf' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = FeatureExtractionPipeline(UpperCamelCase_ ,UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
SCREAMING_SNAKE_CASE_ : Dict = infer_shapes(UpperCamelCase_ ,UpperCamelCase_ )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase_ ) ,len(UpperCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,UpperCamelCase_ )
self.assertSequenceEqual(variable_names[3:] ,UpperCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ['input_ids', 'attention_mask', 'token_type_ids']
SCREAMING_SNAKE_CASE_ : str = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE_ : str = ensure_valid_input(FuncContiguousArgs() ,UpperCamelCase_ ,UpperCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase_ ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase_ ) ,set(UpperCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase_ ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE_ : str = ensure_valid_input(FuncNonContiguousArgs() ,UpperCamelCase_ ,UpperCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase_ ) ,1 )
self.assertEqual(len(UpperCamelCase_ ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from math import factorial
UpperCamelCase__ : Tuple = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Dict:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(_A ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _A ) if sum_of_digit_factorial(_A ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = SamImageProcessor()
SCREAMING_SNAKE_CASE_ : List[Any] = SamProcessor(UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self ,**snake_case__ ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = SamProcessor(image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(UpperCAmelCase_ ,return_tensors='np' )
SCREAMING_SNAKE_CASE_ : str = processor(images=UpperCAmelCase_ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SamProcessor(image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ : List[Any] = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ : Optional[int] = [[683, 1024]]
SCREAMING_SNAKE_CASE_ : int = processor.post_process_masks(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ : Tuple = processor.post_process_masks(
UpperCAmelCase_ ,torch.tensor(UpperCAmelCase_ ) ,torch.tensor(UpperCAmelCase_ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.post_process_masks(UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = processor.post_process_masks(UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) )
@require_vision
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE_ : List[Any] = SamProcessor(UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self ,**snake_case__ ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(UpperCAmelCase_ ,return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(images=UpperCAmelCase_ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_tf
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = SamProcessor(image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ : Optional[int] = [[683, 1024]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor.post_process_masks(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ : int = processor.post_process_masks(
UpperCAmelCase_ ,tf.convert_to_tensor(UpperCAmelCase_ ) ,tf.convert_to_tensor(UpperCAmelCase_ ) ,return_tensors='tf' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_ : List[Any] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ : int = processor.post_process_masks(
UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE_ : List[Any] = processor.post_process_masks(
UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) ,return_tensors='tf' )
@require_vision
@require_torchvision
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SamImageProcessor()
SCREAMING_SNAKE_CASE_ : List[Any] = SamProcessor(UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self ,**snake_case__ ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = SamProcessor(image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : List[Any] = [tf.convert_to_tensor(UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE_ : str = [torch.tensor(UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE_ : List[Any] = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ : List[Any] = [[683, 1024]]
SCREAMING_SNAKE_CASE_ : Any = processor.post_process_masks(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : Tuple = processor.post_process_masks(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = SamProcessor(image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = image_processor(UpperCAmelCase_ ,return_tensors='pt' )['pixel_values'].numpy()
SCREAMING_SNAKE_CASE_ : Tuple = processor(images=UpperCAmelCase_ ,return_tensors='pt' )['pixel_values'].numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(UpperCAmelCase_ ,return_tensors='tf' )['pixel_values'].numpy()
SCREAMING_SNAKE_CASE_ : Dict = processor(images=UpperCAmelCase_ ,return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) )
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) )
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) )
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase__ : Tuple = "bart"
UpperCamelCase__ : Optional[Any] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qar_model.eval()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE_ : str = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sas_model.eval()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ : Any = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE_ : Any = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
SCREAMING_SNAKE_CASE_ : List[str] = faiss.IndexFlatIP(1_28 )
SCREAMING_SNAKE_CASE_ : Tuple = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE_ : Any = (None, None)
SCREAMING_SNAKE_CASE_ : Any = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE_ : Tuple = elia['train_eli5']
SCREAMING_SNAKE_CASE_ : List[Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
SCREAMING_SNAKE_CASE_ : Optional[int] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
UpperCamelCase__ : List[str] = load_indexes()
UpperCamelCase__ : List[str] = load_models()
UpperCamelCase__ : List[str] = load_train_data()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any=10 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : int="wiki40b" , lowerCamelCase_ : Optional[int]="dense" , lowerCamelCase_ : int=10 ) -> Dict:
"""simple docstring"""
if source == "none":
SCREAMING_SNAKE_CASE_ : Dict = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE_ : List[str] = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Any = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE_ : Tuple = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=64 , lowerCamelCase_ : str=2_56 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Optional[int]=0.9_5 , lowerCamelCase_ : Tuple=0.8 ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCamelCase__ : int = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCamelCase__ : int = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase__ : Dict = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase__ : List[str] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCamelCase__ : Dict = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCamelCase__ : Optional[Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCamelCase__ : Optional[Any] = action_list.index(action_st)
UpperCamelCase__ : str = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCamelCase__ : List[Any] = show_type == "Show full text of passages"
else:
UpperCamelCase__ : Any = 3
UpperCamelCase__ : str = True
UpperCamelCase__ : Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCamelCase__ : List[str] = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCamelCase__ : Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCamelCase__ : Any = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCamelCase__ : Dict = "wiki40b"
UpperCamelCase__ : List[str] = "dense"
UpperCamelCase__ : int = "beam"
UpperCamelCase__ : Optional[int] = 2
UpperCamelCase__ : Dict = 64
UpperCamelCase__ : Dict = 2_56
UpperCamelCase__ : str = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCamelCase__ : str = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCamelCase__ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCamelCase__ : Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCamelCase__ : Union[str, Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase__ : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase__ : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase__ : Optional[Any] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase__ : Union[str, Any] = None
# start main text
UpperCamelCase__ : Tuple = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCamelCase__ : str = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase__ : Any = st.text_input('''Enter your question here:''', '''''')
else:
UpperCamelCase__ : Any = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase__ : Optional[int] = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCamelCase__ : List[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCamelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase__ : int = support_list[:10]
UpperCamelCase__ : Optional[int] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCamelCase__ : Dict = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase__ : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCamelCase__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(''' ''', '''_'''))
UpperCamelCase__ : List[Any] = res[1].strip()
if sec_titles == "":
UpperCamelCase__ : Tuple = "[{}]({})".format(res[0], wiki_url)
else:
UpperCamelCase__ : int = sec_titles.split(''' & ''')
UpperCamelCase__ : Any = " & ".join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style=\"font-family:arial; font-size:10pt;\">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase__ : List[Any] = find_nearest_training(question)
UpperCamelCase__ : Union[str, Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCamelCase__ : List[Any] = [
"{}. {}".format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCamelCase__ : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> list:
"""simple docstring"""
if len(__A ) <= 1:
return [tuple(__A )]
SCREAMING_SNAKE_CASE_ : List[str] = []
def generate(lowerCamelCase_ : int , lowerCamelCase_ : list ):
SCREAMING_SNAKE_CASE_ : Any = [0] * n
res.append(tuple(__A ) )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE_ : Tuple = arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(__A ) )
c[i] += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
else:
SCREAMING_SNAKE_CASE_ : List[str] = 0
i += 1
generate(len(__A ) , __A )
return res
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : Tuple = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : List[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : Any = 42
__a : str = 42
__a : Optional[int] = None
__a : Any = None
__a : List[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str = 42
__a : Any = None
__a : Optional[int] = None
__a : Any = None
__a : Any = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[Any] = 42
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : List[str] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : str = os.path.join(
_lowerCAmelCase ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(_lowerCAmelCase ) ,_lowerCAmelCase ,) ,)
SCREAMING_SNAKE_CASE_ : Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : int = cached_features_file + '.lock'
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : str = torch.load(_lowerCAmelCase )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : Dict = (
processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
)
logger.info('Training examples: %s' ,len(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
logger.info('Saving features into cached file %s' ,_lowerCAmelCase )
torch.save(self.features ,_lowerCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : str = 42
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : str = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Tuple = label_list
SCREAMING_SNAKE_CASE_ : Any = processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_convert_examples_to_features(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : Tuple = tf.data.Dataset.from_generator(
_lowerCAmelCase ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = []
for i, line in enumerate(_lowerCAmelCase ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : Dict = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Any = line[5]
SCREAMING_SNAKE_CASE_ : List[str] = line[6]
SCREAMING_SNAKE_CASE_ : Any = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[Any] = line[0]
examples.append(InputExample(guid=_lowerCAmelCase ,text_a=_lowerCAmelCase ,text_b=_lowerCAmelCase ,label=_lowerCAmelCase ,pairID=_lowerCAmelCase ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {label: i for i, label in enumerate(__lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' , truncation=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : int = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : str = int(example.pairID )
features.append(InputFeatures(**__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : List[Any] = {
'hans': 3,
}
UpperCamelCase__ : Tuple = {
'hans': HansProcessor,
}
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCamelCase__ : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
state_dict.pop('pixel_mean' , lowerCamelCase__ )
state_dict.pop('pixel_std' , lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_ : str = key.replace(lowerCamelCase__ , lowerCamelCase__ )
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
SCREAMING_SNAKE_CASE_ : int = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('layers.2' , 'proj_out' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
SCREAMING_SNAKE_CASE_ : str = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any]="ybelkada/segment-anything" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = hf_hub_download(lowerCamelCase__ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
SCREAMING_SNAKE_CASE_ : Optional[int] = SamConfig(
vision_config=lowerCamelCase__ , )
elif "sam_vit_h" in model_name:
SCREAMING_SNAKE_CASE_ : List[str] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
SCREAMING_SNAKE_CASE_ : Any = SamConfig(
vision_config=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCamelCase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : int = replace_keys(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = SamImageProcessor()
SCREAMING_SNAKE_CASE_ : Any = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = hf_model.to('cuda' )
SCREAMING_SNAKE_CASE_ : List[Any] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
SCREAMING_SNAKE_CASE_ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[str] = [[[4_00, 6_50]]]
SCREAMING_SNAKE_CASE_ : Any = [[1]]
SCREAMING_SNAKE_CASE_ : Any = processor(images=np.array(lowerCamelCase__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = hf_model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
SCREAMING_SNAKE_CASE_ : Tuple = processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hf_model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
SCREAMING_SNAKE_CASE_ : Any = ((75, 2_75, 17_25, 8_50),)
SCREAMING_SNAKE_CASE_ : Any = processor(images=np.array(lowerCamelCase__ ) , input_boxes=lowerCamelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = hf_model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
SCREAMING_SNAKE_CASE_ : str = [[[4_00, 6_50], [8_00, 6_50]]]
SCREAMING_SNAKE_CASE_ : Tuple = [[1, 1]]
SCREAMING_SNAKE_CASE_ : str = processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = hf_model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
UpperCamelCase__ : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ : List[str] = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any ) -> int:
"""simple docstring"""
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ : Optional[int] = layer.split('metadata' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ : Dict = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ : List[str] = layer.split('kvstore' )
SCREAMING_SNAKE_CASE_ : Optional[int] = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
SCREAMING_SNAKE_CASE_ : Tuple = layer.split('/' )
SCREAMING_SNAKE_CASE_ : int = "/".join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ : Optional[int] = "file"
else:
SCREAMING_SNAKE_CASE_ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = rename_keys(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v
SCREAMING_SNAKE_CASE_ : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str = WEIGHTS_NAME ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = convert_file_size_to_int(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep='/' )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ : Optional[int] = content
else:
SCREAMING_SNAKE_CASE_ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ : Optional[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace('.bin' , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace('.bin' , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = weights_name.replace(
'.bin' , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace('.bin' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Tuple = shard
for key in shard:
SCREAMING_SNAKE_CASE_ : str = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"total_size": total_size}
SCREAMING_SNAKE_CASE_ : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
UpperCamelCase__ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
SCREAMING_SNAKE_CASE_ : int = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = TaTokenizer.from_pretrained('t5-small' )
SCREAMING_SNAKE_CASE_ : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
__a : Optional[int] = """pixel_values"""
__a : Optional[Any] = False
__a : str = TimmBackboneConfig
def __init__( self ,snake_case__ ,**snake_case__ ):
requires_backends(self ,'timm' )
super().__init__(_a )
SCREAMING_SNAKE_CASE_ : List[str] = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(_a ,'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
SCREAMING_SNAKE_CASE_ : Any = getattr(_a ,'use_pretrained_backbone' ,_a )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE_ : List[str] = config.out_indices if getattr(_a ,'out_indices' ,_a ) is not None else (-1,)
SCREAMING_SNAKE_CASE_ : Tuple = timm.create_model(
config.backbone ,pretrained=_a ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=_a ,**_a ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE_ : Dict = self._backbone.return_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {layer["""module"""]: str(_a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_a )
@classmethod
def snake_case ( cls ,snake_case__ ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('config' ,TimmBackboneConfig() )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('use_timm_backbone' ,_a )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('num_channels' ,config.num_channels )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('features_only' ,config.features_only )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('use_pretrained_backbone' ,config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('out_indices' ,config.out_indices )
SCREAMING_SNAKE_CASE_ : List[str] = TimmBackboneConfig(
backbone=_a ,num_channels=_a ,features_only=_a ,use_pretrained_backbone=_a ,out_indices=_a ,)
return super()._from_config(_a ,**_a )
def snake_case ( self ,snake_case__ ):
pass
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE_ : List[Any] = self._all_layers
SCREAMING_SNAKE_CASE_ : Dict = self._backbone(_a ,**_a )
SCREAMING_SNAKE_CASE_ : Tuple = self._return_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._backbone(_a ,**_a )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = tuple(_a )
SCREAMING_SNAKE_CASE_ : Dict = tuple(_a ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE_ : Any = (feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Any = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_a ,hidden_states=_a ,attentions=_a )
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Any=7 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = None
if token is not None:
SCREAMING_SNAKE_CASE_ : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ : int = '636036'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE_ : Optional[int] = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_daily_ci_runs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ : Any = workflow_run['id']
break
return workflow_run_id
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_last_daily_ci_runs(__UpperCAmelCase )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = get_artifacts_links(worflow_run_id=__UpperCAmelCase , token=__UpperCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ : Any = artifacts_links[artifact_name]
download_artifact(
artifact_name=__UpperCAmelCase , artifact_url=__UpperCAmelCase , output_dir=__UpperCAmelCase , token=__UpperCAmelCase )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
get_last_daily_ci_artifacts(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(__UpperCAmelCase , F'{artifact_name}.zip' )
if os.path.isfile(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = {}
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
with z.open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f.read().decode('UTF-8' )
return results
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[Any] = "funnel"
__a : int = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self ,snake_case__=30522 ,snake_case__=[4, 4, 4] ,snake_case__=None ,snake_case__=2 ,snake_case__=768 ,snake_case__=12 ,snake_case__=64 ,snake_case__=3072 ,snake_case__="gelu_new" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.0 ,snake_case__=0.1 ,snake_case__=None ,snake_case__=1E-9 ,snake_case__="mean" ,snake_case__="relative_shift" ,snake_case__=True ,snake_case__=True ,snake_case__=True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : int = block_sizes
SCREAMING_SNAKE_CASE_ : Dict = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ : List[str] = num_decoder_layers
SCREAMING_SNAKE_CASE_ : Dict = d_model
SCREAMING_SNAKE_CASE_ : Any = n_head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_head
SCREAMING_SNAKE_CASE_ : Tuple = d_inner
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE_ : Dict = attention_dropout
SCREAMING_SNAKE_CASE_ : int = activation_dropout
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = initializer_std
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
SCREAMING_SNAKE_CASE_ : str = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
SCREAMING_SNAKE_CASE_ : Tuple = attention_type
SCREAMING_SNAKE_CASE_ : int = separate_cls
SCREAMING_SNAKE_CASE_ : List[str] = truncate_seq
SCREAMING_SNAKE_CASE_ : Dict = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def snake_case ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case ( self ,snake_case__ ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def snake_case ( self ):
return len(self.block_sizes )
@num_blocks.setter
def snake_case ( self ,snake_case__ ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
def brightness(lowerCamelCase_ : Optional[Any] ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__A )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
UpperCamelCase__ : str = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
from __future__ import annotations
import math
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
if num <= 0:
SCREAMING_SNAKE_CASE_ : int = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * (num + 1)
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : str = int(math.sqrt(_lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowerCamelCase ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE_ : List[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = "cpu" ,snake_case__ = "openai/clip-vit-large-patch14" ):
SCREAMING_SNAKE_CASE_ : int = device
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [0.48145466, 0.4578275, 0.40821073]
SCREAMING_SNAKE_CASE_ : Tuple = [0.26862954, 0.26130258, 0.27577711]
SCREAMING_SNAKE_CASE_ : List[Any] = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
SCREAMING_SNAKE_CASE_ : int = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torchvision.transforms.CenterCrop(224 )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.resize(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = self.normalize(__lowerCamelCase )
return images
def __call__( self ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(text=__lowerCamelCase ,**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.preprocess_img(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__=10 ,snake_case__=0.01 ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=False ,snake_case__=True ,snake_case__="image" ,snake_case__=True ,snake_case__=False ,snake_case__=False ,snake_case__=False ,):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : str = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE_ : List[str] = vqgan
else:
SCREAMING_SNAKE_CASE_ : Dict = load_vqgan(self.device ,conf_path=__lowerCamelCase ,ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE_ : Optional[Any] = clip
else:
SCREAMING_SNAKE_CASE_ : Dict = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE_ : Dict = iterations
SCREAMING_SNAKE_CASE_ : Tuple = lr
SCREAMING_SNAKE_CASE_ : Tuple = log
SCREAMING_SNAKE_CASE_ : Optional[int] = make_grid
SCREAMING_SNAKE_CASE_ : str = return_val
SCREAMING_SNAKE_CASE_ : List[Any] = quantize
SCREAMING_SNAKE_CASE_ : str = self.vqgan.decoder.z_shape
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=5 ,snake_case__=True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
if output_path is None:
SCREAMING_SNAKE_CASE_ : List[Any] = "./animation.gif"
if input_path is None:
SCREAMING_SNAKE_CASE_ : str = self.save_path
SCREAMING_SNAKE_CASE_ : Optional[int] = sorted(glob(input_path + '/*' ) )
if not len(__lowerCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(__lowerCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = total_duration / len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
SCREAMING_SNAKE_CASE_ : int = 1.5
SCREAMING_SNAKE_CASE_ : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase ,__lowerCamelCase ,duration=__lowerCamelCase )
print(F'gif saved to {output_path}' )
def snake_case ( self ,snake_case__=None ,snake_case__=None ):
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE_ : List[str] = preprocess(Image.open(__lowerCamelCase ) ,target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE_ : str = preprocess_vqgan(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.vqgan.encode(__lowerCamelCase )
return z
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE_ : List[str] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.vqgan.quantize(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : str = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.clip_preprocessor(text=__lowerCamelCase ,images=__lowerCamelCase ,return_tensors='pt' ,padding=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : int = self.clip(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE_ : Any = similarity_logits * weights
return similarity_logits.sum()
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self._get_clip_similarity(pos_prompts['prompts'] ,__lowerCamelCase ,weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_clip_similarity(neg_prompts['prompts'] ,__lowerCamelCase ,weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([1] ,device=self.device )
SCREAMING_SNAKE_CASE_ : List[Any] = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = torch.randn_like(self.latent ,requires_grad=__lowerCamelCase ,device=self.device )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE_ : List[str] = self._add_vector(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = loop_post_process(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = self._get_CLIP_loss(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
print('CLIP loss' ,__lowerCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
wandb.init(reinit=__lowerCamelCase ,project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.resize((256, 256) )
wandb.log('Original Image' ,wandb.Image(__lowerCamelCase ) )
def snake_case ( self ,snake_case__ ):
if not prompts:
return []
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : int = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(__lowerCamelCase ,(tuple, list) ):
SCREAMING_SNAKE_CASE_ : List[Any] = prompt[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE_ : Optional[Any] = prompt.split(':' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = float(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase ,device=self.device ),
}
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=True ,snake_case__=None ,):
if image_path:
SCREAMING_SNAKE_CASE_ : str = self._get_latent(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE_ : List[str] = self.process_prompts(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE_ : Any = os.path.join('./outputs/' ,'_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = save_path + "_" + get_timestamp()
os.makedirs(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = save_path
SCREAMING_SNAKE_CASE_ : List[str] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'Image': wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path ,F'iter_{iter:03d}_final.png' ) )
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCamelCase__ : Dict = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
UpperCamelCase__ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCamelCase__ : Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCamelCase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] ,reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] ,)
def snake_case ( self ,snake_case__ ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=0.9 ,snake_case__=3 ,snake_case__=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) ,word_tokenize(lowerCamelCase_ ) ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
else:
SCREAMING_SNAKE_CASE_ : Dict = [
meteor_score.single_meteor_score(lowerCamelCase_ ,lowerCamelCase_ ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
import numpy as np
UpperCamelCase__ : Union[str, Any] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : str = np.array(__UpperCamelCase )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = message.lower()
SCREAMING_SNAKE_CASE_ : Tuple = message.replace(' ' ,'' )
SCREAMING_SNAKE_CASE_ : int = message.replace('j' ,'i' )
SCREAMING_SNAKE_CASE_ : int = np.empty((2, len(__UpperCamelCase )) )
for letter_index in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ : Dict = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ : str = numbers[0]
SCREAMING_SNAKE_CASE_ : int = numbers[1]
SCREAMING_SNAKE_CASE_ : Tuple = first_step.reshape(2 * len(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = ''
for numbers_index in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ : str = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.numbers_to_letter(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = encoded_message + letter
return encoded_message
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = message.lower()
message.replace(' ' ,'' )
SCREAMING_SNAKE_CASE_ : Tuple = np.empty(2 * len(__UpperCamelCase ) )
for letter_index in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ : Any = numbers[0]
SCREAMING_SNAKE_CASE_ : Tuple = numbers[1]
SCREAMING_SNAKE_CASE_ : str = first_step.reshape((2, len(__UpperCamelCase )) )
SCREAMING_SNAKE_CASE_ : List[Any] = ''
for numbers_index in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE_ : Any = self.numbers_to_letter(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Any = decoded_message + letter
return decoded_message
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [False] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [-1] * len(__UpperCamelCase )
def dfs(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : int = c
for u in graph[v]:
if not visited[u]:
dfs(__UpperCamelCase , 1 - c )
for i in range(len(__UpperCamelCase ) ):
if not visited[i]:
dfs(__UpperCamelCase , 0 )
for i in range(len(__UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase__ : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=24 ,snake_case__=2 ,snake_case__=6 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=None ,snake_case__=1000 ,):
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : int = scope
SCREAMING_SNAKE_CASE_ : Union[str, Any] = range_bbox
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : int = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = t
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case ( self ):
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Any = LiltModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(A_ ,bbox=A_ ,attention_mask=A_ ,token_type_ids=A_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(A_ ,bbox=A_ ,token_type_ids=A_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(A_ ,bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
A_ ,bbox=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Any = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
A_ ,bbox=A_ ,attention_mask=A_ ,token_type_ids=A_ ,start_positions=A_ ,end_positions=A_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
__a : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = False
__a : List[str] = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
return True
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = LiltModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*A_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def snake_case ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A_ )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[1, 2]] ,device=A_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=A_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=A_ ,bbox=A_ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 2, 768] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] ,device=A_ ,)
self.assertTrue(outputs.last_hidden_state.shape ,A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,A_ ,atol=1E-3 ) )
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
UpperCamelCase__ : Union[str, Any] = '''1'''
UpperCamelCase__ : List[Any] = '''0'''
UpperCamelCase__ : str = '''1'''
UpperCamelCase__ : List[Any] = ort.SessionOptions()
UpperCamelCase__ : int = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
UpperCamelCase__ : Any = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
UpperCamelCase__ : Tuple = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
UpperCamelCase__ : Any = ort.RunOptions()
UpperCamelCase__ : str = 1_28
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = np.ones((batch, sequence), dtype=np.intaa)
UpperCamelCase__ : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
UpperCamelCase__ : str = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
UpperCamelCase__ : int = time.time()
UpperCamelCase__ : str = 20_00
UpperCamelCase__ : Optional[int] = {}
for iter in range(max_iters):
UpperCamelCase__ : Optional[int] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 10_00 / max_iters))
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , *lowerCamelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
return fun(lowerCamelCase_ , *lowerCamelCase_ ), None
except Exception as e:
return None, e
UpperCamelCase__ : Any = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
UpperCamelCase__ : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
UpperCamelCase__ : Union[str, Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
UpperCamelCase__ : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
UpperCamelCase__ : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCamelCase__ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = HashMap(initial_block_size=4 )
SCREAMING_SNAKE_CASE_ : Any = {}
for _, (fun, *args) in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _run_operation(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = _run_operation(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ )
assert my_res == py_res
assert str(lowerCamelCase_ ) == str(lowerCamelCase_ )
assert set(lowerCamelCase_ ) == set(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
def is_public(lowerCamelCase_ : str ) -> bool:
return not name.startswith('_' )
SCREAMING_SNAKE_CASE_ : int = {name for name in dir({} ) if is_public(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {name for name in dir(HashMap() ) if is_public(lowerCamelCase_ )}
assert dict_public_names > hash_public_names
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=[1, 1, 2] ,snake_case__=1 ,snake_case__=32 ,snake_case__=4 ,snake_case__=8 ,snake_case__=37 ,snake_case__="gelu_new" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.0 ,snake_case__=512 ,snake_case__=3 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : int = seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = block_sizes
SCREAMING_SNAKE_CASE_ : int = num_decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE_ : Any = d_head
SCREAMING_SNAKE_CASE_ : int = d_inner
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout
SCREAMING_SNAKE_CASE_ : Dict = attention_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : int = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : Any = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_ : Any = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_ : Tuple = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_ : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_hidden_layers + 2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : int = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFFunnelModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = TFFunnelModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : str = TFFunnelBaseModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = TFFunnelBaseModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFFunnelBaseModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = TFFunnelForPreTraining(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : str = TFFunnelForMaskedLM(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFFunnelForSequenceClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Any = self.num_choices
SCREAMING_SNAKE_CASE_ : int = TFFunnelForMultipleChoice(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.tile(tf.expand_dims(snake_case__ ,1 ) ,(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] = tf.tile(tf.expand_dims(snake_case__ ,1 ) ,(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.tile(tf.expand_dims(snake_case__ ,1 ) ,(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelForTokenClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelForQuestionAnswering(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ):
__a : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__a : Union[str, Any] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : int = False
__a : Optional[Any] = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = ConfigTester(self ,config_class=snake_case__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@require_tf
class lowerCAmelCase_ ( lowercase__ , unittest.TestCase ):
__a : Union[str, Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__a : Tuple = False
__a : int = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelModelTester(self ,base=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
import operator as op
UpperCamelCase__ : List[str] = '''scaler.pt'''
UpperCamelCase__ : Union[str, Any] = '''pytorch_model'''
UpperCamelCase__ : Optional[Any] = '''random_states'''
UpperCamelCase__ : int = '''optimizer'''
UpperCamelCase__ : int = '''scheduler'''
UpperCamelCase__ : int = '''pytorch_model.bin'''
UpperCamelCase__ : Dict = '''pytorch_model.bin.index.json'''
UpperCamelCase__ : Tuple = '''model.safetensors'''
UpperCamelCase__ : Optional[Any] = '''model.safetensors.index.json'''
UpperCamelCase__ : Optional[Any] = '''1.10.2'''
UpperCamelCase__ : str = '''py38'''
UpperCamelCase__ : Union[str, Any] = '''4.17.0'''
UpperCamelCase__ : Dict = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
UpperCamelCase__ : Union[str, Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
UpperCamelCase__ : Tuple = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
UpperCamelCase__ : int = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
UpperCamelCase__ : Dict = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
UpperCamelCase__ : List[Any] = '''2.0.1'''
UpperCamelCase__ : int = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
UpperCamelCase__ : str = ['''default''', '''reduce-overhead''', '''max-autotune''']
UpperCamelCase__ : Any = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase__ : List[str] = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
UpperCamelCase__ : Dict = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
UpperCamelCase__ : Optional[int] = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCAmelCase_ ( __a ):
__a : Union[str, Any] = "segformer"
def __init__( self ,snake_case__=3 ,snake_case__=4 ,snake_case__=[2, 2, 2, 2] ,snake_case__=[8, 4, 2, 1] ,snake_case__=[32, 64, 160, 256] ,snake_case__=[7, 3, 3, 3] ,snake_case__=[4, 2, 2, 2] ,snake_case__=[1, 2, 5, 8] ,snake_case__=[4, 4, 4, 4] ,snake_case__="gelu" ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=0.1 ,snake_case__=1E-6 ,snake_case__=256 ,snake_case__=255 ,**snake_case__ ,):
super().__init__(**a_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' ,a_ ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = num_encoder_blocks
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : Dict = sr_ratios
SCREAMING_SNAKE_CASE_ : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = patch_sizes
SCREAMING_SNAKE_CASE_ : List[str] = strides
SCREAMING_SNAKE_CASE_ : Optional[int] = mlp_ratios
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = decoder_hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.get('reshape_last_stage' ,a_ )
SCREAMING_SNAKE_CASE_ : Any = semantic_loss_ignore_index
class lowerCAmelCase_ ( __a ):
__a : int = version.parse("1.11" )
@property
def snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
return 1E-4
@property
def snake_case ( self ):
return 12
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Any = "SpeechT5FeatureExtractor"
__a : int = "SpeechT5Tokenizer"
def __init__( self ,snake_case__ ,snake_case__ ):
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('audio' ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('text' ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('text_target' ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('audio_target' ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('sampling_rate' ,__lowerCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor(__lowerCAmelCase ,*__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,**__lowerCAmelCase )
elif text is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(__lowerCAmelCase ,**__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extractor(audio_target=__lowerCAmelCase ,*__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = targets['input_values']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(__lowerCAmelCase ,**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = targets['input_ids']
else:
SCREAMING_SNAKE_CASE_ : str = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Any = labels
SCREAMING_SNAKE_CASE_ : Tuple = targets.get('attention_mask' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : str = decoder_attention_mask
return inputs
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('input_values' ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('input_ids' ,__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('labels' ,__lowerCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor.pad(__lowerCAmelCase ,*__lowerCAmelCase ,**__lowerCAmelCase )
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.pad(__lowerCAmelCase ,**__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.pad(__lowerCAmelCase ,**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = targets['input_ids']
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor.pad(__lowerCAmelCase ,*__lowerCAmelCase ,**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_size_hack
SCREAMING_SNAKE_CASE_ : List[str] = targets['input_values']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = labels
SCREAMING_SNAKE_CASE_ : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : str = decoder_attention_mask
return inputs
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase )
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = s.rsplit(UpperCAmelCase__ , UpperCAmelCase__ )
return new.join(UpperCAmelCase__ )
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> int:
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : Any = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
SCREAMING_SNAKE_CASE_ : Any = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : int = rreplace(UpperCAmelCase__ , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : int = rreplace(UpperCAmelCase__ , '.b' , '.bias' , 1 )
SCREAMING_SNAKE_CASE_ : int = value.float()
return upgrade
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
from dall_e import Encoder
SCREAMING_SNAKE_CASE_ : Tuple = Encoder()
if os.path.exists(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.hub.load_state_dict_from_url(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = ckpt.state_dict()
encoder.load_state_dict(UpperCAmelCase__ )
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Dict = FlavaImageCodebookConfig.from_pretrained(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Any = FlavaImageCodebookConfig()
SCREAMING_SNAKE_CASE_ : Optional[int] = FlavaImageCodebook(UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE_ : int = encoder.state_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = upgrade_state_dict(UpperCAmelCase__ )
hf_model.load_state_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = hf_model.state_dict()
SCREAMING_SNAKE_CASE_ : Dict = count_parameters(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = count_parameters(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(UpperCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'num_attention_heads' ) )
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=32 ,snake_case__=2 ,snake_case__=3 ,snake_case__=640 ,snake_case__=4 ,snake_case__="silu" ,snake_case__=3 ,snake_case__=32 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=True ,snake_case__=True ,snake_case__=10 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : int = image_size
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : str = last_hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = conv_kernel_size
SCREAMING_SNAKE_CASE_ : Any = output_stride
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case ( self ):
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
__a : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__a : List[Any] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Optional[int] = False
__a : List[str] = False
__a : List[str] = False
__a : Any = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MobileViTConfigTester(self ,config_class=lowerCAmelCase__ ,has_text_modality=lowerCAmelCase__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def snake_case ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def snake_case ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowerCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case ( self ):
def check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**self._prepare_for_class(lowerCAmelCase__ ,lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Any = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : int = 5
self.assertEqual(len(lowerCAmelCase__ ) ,lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE_ : int = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = True
check_hidden_states_output(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def snake_case ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowerCAmelCase__ ,return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ : int = model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Any = image_processor(images=lowerCAmelCase__ ,return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=lowerCAmelCase__ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,lowerCAmelCase__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ : Dict = model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=lowerCAmelCase__ ,return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ : int = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ ,target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,lowerCAmelCase__ )
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
import json
import sys
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ) -> str:
"""simple docstring"""
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = results[benchmark_name]
SCREAMING_SNAKE_CASE_ : int = benchmark_name.split('/' )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
SCREAMING_SNAKE_CASE_ : str = '| metric |'
SCREAMING_SNAKE_CASE_ : List[str] = '|--------|'
SCREAMING_SNAKE_CASE_ : Dict = '| new / old (diff) |'
for metric_name in sorted(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE_ : Optional[int] = metric_vals['new']
SCREAMING_SNAKE_CASE_ : Tuple = metric_vals.get('old' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = metric_vals.get('diff' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = F' {new_val:f}' if isinstance(UpperCAmelCase__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(UpperCAmelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(UpperCAmelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCAmelCase__ ) )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = sys.argv[1]
UpperCamelCase__ : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ : int = logging.get_logger(__name__)
class lowerCAmelCase_ ( __UpperCAmelCase ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' ,UpperCAmelCase_ ,)
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ : List[str] = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ : Optional[Any] = 'UperNetConfig'
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 0 ,snake_case__ = False ,snake_case__ = 1 ,):
super().__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Convad(
in_channels=UpperCamelCase__ ,out_channels=UpperCamelCase__ ,kernel_size=UpperCamelCase__ ,padding=UpperCamelCase__ ,bias=UpperCamelCase__ ,dilation=UpperCamelCase__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.BatchNormad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.ReLU()
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.conv(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.batch_norm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.activation(UpperCamelCase__ )
return output
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = [
nn.AdaptiveAvgPoolad(UpperCamelCase__ ),
UperNetConvModule(UpperCamelCase__ ,UpperCamelCase__ ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCamelCase__ ) ,UpperCamelCase__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = input
for layer in self.layers:
SCREAMING_SNAKE_CASE_ : int = layer(UpperCamelCase__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = pool_scales
SCREAMING_SNAKE_CASE_ : List[Any] = align_corners
SCREAMING_SNAKE_CASE_ : List[Any] = in_channels
SCREAMING_SNAKE_CASE_ : str = channels
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i, pool_scale in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=UpperCamelCase__ ,in_channels=UpperCamelCase__ ,channels=UpperCamelCase__ )
self.blocks.append(UpperCamelCase__ )
self.add_module(str(UpperCamelCase__ ) ,UpperCamelCase__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE_ : Tuple = ppm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = nn.functional.interpolate(
UpperCamelCase__ ,size=x.size()[2:] ,mode='bilinear' ,align_corners=self.align_corners )
ppm_outs.append(UpperCamelCase__ )
return ppm_outs
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = config
SCREAMING_SNAKE_CASE_ : Dict = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE_ : Any = in_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.hidden_size
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : int = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE_ : Tuple = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
SCREAMING_SNAKE_CASE_ : Tuple = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE_ : int = UperNetConvModule(UpperCamelCase__ ,self.channels ,kernel_size=1 )
SCREAMING_SNAKE_CASE_ : List[str] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(UpperCamelCase__ )
self.fpn_convs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def snake_case ( self ):
self.apply(self._init_weights )
def snake_case ( self ,snake_case__ ):
if isinstance(UpperCamelCase__ ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = inputs[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = [x]
psp_outs.extend(self.psp_modules(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(UpperCamelCase__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : List[str] = self.bottleneck(UpperCamelCase__ )
return output
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCamelCase__ ) )
# build top-down path
SCREAMING_SNAKE_CASE_ : List[Any] = len(UpperCamelCase__ )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE_ : str = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=UpperCamelCase__ ,mode='bilinear' ,align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE_ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='bilinear' ,align_corners=self.align_corners )
SCREAMING_SNAKE_CASE_ : Any = torch.cat(UpperCamelCase__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.fpn_bottleneck(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.classifier(UpperCamelCase__ )
return output
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ = 2 ,snake_case__ = 3 ,snake_case__ = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = config
SCREAMING_SNAKE_CASE_ : List[str] = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.auxiliary_channels
SCREAMING_SNAKE_CASE_ : int = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE_ : List[str] = in_index
SCREAMING_SNAKE_CASE_ : Tuple = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE_ : str = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=UpperCamelCase__ ,padding=UpperCamelCase__ ,dilation=UpperCamelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=UpperCamelCase__ ,padding=UpperCamelCase__ ,dilation=UpperCamelCase__ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Identity()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Sequential(*UpperCamelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE_ : Any = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=UpperCamelCase__ ,padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def snake_case ( self ):
self.apply(self._init_weights )
def snake_case ( self ,snake_case__ ):
if isinstance(UpperCamelCase__ ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.convs(UpperCamelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.classifier(UpperCamelCase__ )
return output
class lowerCAmelCase_ ( UpperCamelCase_ ):
__a : str = UperNetConfig
__a : Tuple = "pixel_values"
__a : Optional[Any] = True
def snake_case ( self ,snake_case__ ):
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
UpperCamelCase__ : str = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase__ : int = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCamelCase_ , )
class lowerCAmelCase_ ( UpperCamelCase_ ):
def __init__( self ,snake_case__ ):
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetHead(UpperCamelCase__ ,in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetFCNHead(UpperCamelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=UpperCamelCase__ ,config_class=_CONFIG_FOR_DOC )
def snake_case ( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
UpperCamelCase__ ,output_hidden_states=UpperCamelCase__ ,output_attentions=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.feature_maps
SCREAMING_SNAKE_CASE_ : Optional[int] = self.decode_head(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = nn.functional.interpolate(UpperCamelCase__ ,size=pixel_values.shape[2:] ,mode='bilinear' ,align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE_ : Any = self.auxiliary_head(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.functional.interpolate(
UpperCamelCase__ ,size=pixel_values.shape[2:] ,mode='bilinear' ,align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE_ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(UpperCamelCase__ ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(UpperCamelCase__ ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCamelCase__ ,logits=UpperCamelCase__ ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ : str = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ : str = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ : Any = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ : Optional[int] = logging.getLogger()
UpperCamelCase__ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase_ ( lowercase__ ):
def snake_case ( self ,snake_case__ ):
os.makedirs(__lowercase ,exist_ok=__lowercase )
SCREAMING_SNAKE_CASE_ : Any = {'source': 'What is love ?', 'target': 'life'}
SCREAMING_SNAKE_CASE_ : List[Any] = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE_ : Dict = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowercase ,F'{split}.{field}' ) ,'w' ) as f:
f.write(__lowercase )
def snake_case ( self ,snake_case__ ,snake_case__ = "pytorch" ):
SCREAMING_SNAKE_CASE_ : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : int = os.path.join(__lowercase ,'output' )
SCREAMING_SNAKE_CASE_ : str = os.path.join(__lowercase ,'data' )
self._create_dummy_data(data_dir=__lowercase )
SCREAMING_SNAKE_CASE_ : int = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
SCREAMING_SNAKE_CASE_ : Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowercase ,env=self.get_env() )
SCREAMING_SNAKE_CASE_ : int = os.path.join(__lowercase ,'metrics.json' )
with open(__lowercase ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = json.load(__lowercase )
return result
@require_torch_gpu
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_multi_gpu
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_gpu
@require_ray
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self._run_finetune(gpus=1 ,distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_multi_gpu
@require_ray
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._run_finetune(gpus=1 ,distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCAmelCase_ ( __snake_case ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,**snake_case__ ,):
super().__init__(features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,**A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = Sql(
cache_dir=A_ ,features=A_ ,sql=A_ ,con=A_ ,**A_ ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
self.builder.download_and_prepare(
download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,)
# Build dataset for splits
SCREAMING_SNAKE_CASE_ : Dict = self.builder.as_dataset(
split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,**snake_case__ ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : List[Any] = name
SCREAMING_SNAKE_CASE_ : Tuple = con
SCREAMING_SNAKE_CASE_ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Dict = num_proc
SCREAMING_SNAKE_CASE_ : str = to_sql_kwargs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.to_sql_kwargs.pop('sql' ,A_ )
SCREAMING_SNAKE_CASE_ : int = self.to_sql_kwargs.pop('con' ,A_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.to_sql_kwargs.pop('index' ,A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = self._write(index=A_ ,**self.to_sql_kwargs )
return written
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = args
SCREAMING_SNAKE_CASE_ : List[str] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE_ : Tuple = query_table(
table=self.dataset.data ,key=slice(A_ ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch.to_pandas()
SCREAMING_SNAKE_CASE_ : str = df.to_sql(self.name ,self.con ,index=A_ ,**A_ )
return num_rows or len(A_ )
def snake_case ( self ,snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE_ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,A_ ,A_ )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
__a : Tuple = BlenderbotSmallTokenizer
__a : List[Any] = False
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
SCREAMING_SNAKE_CASE_ : Optional[int] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = "adapt act apte"
SCREAMING_SNAKE_CASE_ : Tuple = "adapt act apte"
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "adapt act apte"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["adapt", "act", "ap@@", "te"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
SCREAMING_SNAKE_CASE_ : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
SCREAMING_SNAKE_CASE_ : List[Any] = "I am a small frog."
SCREAMING_SNAKE_CASE_ : List[Any] = tok([src_text] ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )["input_ids"]
SCREAMING_SNAKE_CASE_ : List[Any] = tok.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
SCREAMING_SNAKE_CASE_ : Tuple = "I am a small frog ."
SCREAMING_SNAKE_CASE_ : int = "."
SCREAMING_SNAKE_CASE_ : Optional[int] = tok(lowerCamelCase__ )["input_ids"]
SCREAMING_SNAKE_CASE_ : int = tok(lowerCamelCase__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase__ : int = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCamelCase__ : str = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def __UpperCAmelCase ( lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(state_dict.keys() )
for name in state_dict_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith('emb.' ):
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
SCREAMING_SNAKE_CASE_ : Any = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowercase__ )
# ffn -> feed_forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
SCREAMING_SNAKE_CASE_ : str = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
SCREAMING_SNAKE_CASE_ : int = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
SCREAMING_SNAKE_CASE_ : List[str] = 'rwkv.' + name
SCREAMING_SNAKE_CASE_ : str = weight
return state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
SCREAMING_SNAKE_CASE_ : List[str] = 5_02_77
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
SCREAMING_SNAKE_CASE_ : List[str] = PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
SCREAMING_SNAKE_CASE_ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE_ : Tuple = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE_ : Dict = hf_hub_download(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = torch.load(lowercase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : List[Any] = convert_state_dict(lowercase__ )
# 4. Split in shards and save
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : str = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n'
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
import math
class lowerCAmelCase_ :
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = 0.0
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
SCREAMING_SNAKE_CASE_ : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
SCREAMING_SNAKE_CASE_ : Optional[int] = SelfOrganizingMap()
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : Tuple = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
SCREAMING_SNAKE_CASE_ : Any = training_samples[j]
# Compute the winning vector
SCREAMING_SNAKE_CASE_ : Optional[int] = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
SCREAMING_SNAKE_CASE_ : List[Any] = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
SCREAMING_SNAKE_CASE_ : Dict = [0, 0, 0, 1]
SCREAMING_SNAKE_CASE_ : Dict = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[str] = MvpTokenizer
__a : Dict = MvpTokenizerFast
__a : str = True
__a : List[Any] = filter_roberta_detectors
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(UpperCAmelCase__ ,range(len(UpperCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE_ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ : str = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase__ )
def snake_case ( self ,snake_case__ ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self ):
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def snake_case ( self ):
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE_ : Dict = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(UpperCAmelCase__ ,max_length=len(UpperCAmelCase__ ) ,padding=UpperCAmelCase__ ,return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
# Test that special tokens are reset
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(UpperCAmelCase__ ,padding=UpperCAmelCase__ ,return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' ,UpperCAmelCase__ )
self.assertIn('attention_mask' ,UpperCAmelCase__ )
self.assertNotIn('labels' ,UpperCAmelCase__ )
self.assertNotIn('decoder_attention_mask' ,UpperCAmelCase__ )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(text_target=UpperCAmelCase__ ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def snake_case ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] ,padding=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 1024) )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = ['A long paragraph for summarization.']
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(UpperCAmelCase__ ,text_target=UpperCAmelCase__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs['input_ids']
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case ( self ):
pass
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ ,**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained(UpperCAmelCase__ ,**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r.encode_plus(UpperCAmelCase__ ,add_special_tokens=UpperCAmelCase__ ,return_token_type_ids=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.encode_plus(UpperCAmelCase__ ,add_special_tokens=UpperCAmelCase__ ,return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCAmelCase__ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase_ ( __a ):
__a : List[str] = "luke"
def __init__( self ,snake_case__=50267 ,snake_case__=500000 ,snake_case__=768 ,snake_case__=256 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=True ,snake_case__=None ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = entity_vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = entity_emb_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = use_entity_aware_attention
SCREAMING_SNAKE_CASE_ : List[Any] = classifier_dropout
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
import random
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = a[left_index]
SCREAMING_SNAKE_CASE_ : List[Any] = left_index + 1
for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = a[i - 1], a[left_index]
return i - 1
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if left < right:
SCREAMING_SNAKE_CASE_ : List[Any] = random.randint(_SCREAMING_SNAKE_CASE , right - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE_ : List[str] = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
quick_sort_random(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = input('Enter numbers separated by a comma:\n' ).strip()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(',' )]
quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=64 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = embedding_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Any = num_choices
SCREAMING_SNAKE_CASE_ : List[str] = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MegatronBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = model(__UpperCamelCase ,token_type_ids=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = MegatronBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = MegatronBertForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = MegatronBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = MegatronBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,next_sentence_label=__UpperCamelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MegatronBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,start_positions=__UpperCamelCase ,end_positions=__UpperCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = MegatronBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = MegatronBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE_ : Any = MegatronBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
__a : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a : List[Any] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[int] = True
# test_resize_embeddings = False
__a : Tuple = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : str = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCamelCase )
return inputs_dict
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCamelCase )
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
UpperCamelCase__ : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE_ : str = os.path.join(os.environ['MYDIR'] ,__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = MegatronBertModel.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
model.half()
SCREAMING_SNAKE_CASE_ : Any = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(__UpperCamelCase )[0]
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape ,__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = output[0, ii, jj]
SCREAMING_SNAKE_CASE_ : List[str] = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE_ : List[Any] = 'ii={} jj={} a={} b={}'.format(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
self.assertTrue(math.isclose(__UpperCamelCase ,__UpperCamelCase ,rel_tol=__UpperCamelCase ,abs_tol=__UpperCamelCase ) ,msg=__UpperCamelCase )
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0, 0
SCREAMING_SNAKE_CASE_ : List[Any] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE_ : Optional[int] = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE_ : List[str] = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
ugly_nums.append(lowerCAmelCase_ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ : Any = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
import random
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : bool = False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ : List[str] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ : List[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ : Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : list[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE_ : Any = random.choice(a_ )
return "".join(a_ )
def __UpperCAmelCase ( lowerCamelCase_ : tuple[str, float] , lowerCamelCase_ : list[tuple[str, float]] , lowerCamelCase_ : list[str] , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE_ : List[Any] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE_ : str = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE_ : str = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE_ : Dict = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : list[str] , lowerCamelCase_ : bool = True ) -> int:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE_ : List[str] = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE_ : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for _ in range(a_ ):
population.append(''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE_ : List[Any] = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE_ : Optional[int] = sorted(a_ , key=lambda lowerCamelCase_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE_ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE_ : List[Any] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
UpperCamelCase__ : List[str] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase__ : Optional[int] = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = "rag"
__a : Any = True
def __init__( self ,snake_case__=None ,snake_case__=True ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=" / " ,snake_case__=" // " ,snake_case__=5 ,snake_case__=300 ,snake_case__=768 ,snake_case__=8 ,snake_case__="wiki_dpr" ,snake_case__="train" ,snake_case__="compressed" ,snake_case__=None ,snake_case__=None ,snake_case__=False ,snake_case__=False ,snake_case__=0.0 ,snake_case__=True ,snake_case__=False ,snake_case__=False ,snake_case__=False ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
bos_token_id=snake_case__ ,pad_token_id=snake_case__ ,eos_token_id=snake_case__ ,decoder_start_token_id=snake_case__ ,forced_eos_token_id=snake_case__ ,is_encoder_decoder=snake_case__ ,prefix=snake_case__ ,vocab_size=snake_case__ ,**snake_case__ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('question_encoder' )
SCREAMING_SNAKE_CASE_ : Tuple = question_encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('generator' )
SCREAMING_SNAKE_CASE_ : Any = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.for_model(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.for_model(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = reduce_loss
SCREAMING_SNAKE_CASE_ : Dict = label_smoothing
SCREAMING_SNAKE_CASE_ : Dict = exclude_bos_score
SCREAMING_SNAKE_CASE_ : Tuple = do_marginalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = title_sep
SCREAMING_SNAKE_CASE_ : Any = doc_sep
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_docs
SCREAMING_SNAKE_CASE_ : int = max_combined_length
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : str = dataset_split
SCREAMING_SNAKE_CASE_ : Union[str, Any] = index_name
SCREAMING_SNAKE_CASE_ : Any = retrieval_vector_size
SCREAMING_SNAKE_CASE_ : str = retrieval_batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = passages_path
SCREAMING_SNAKE_CASE_ : Dict = index_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_dummy_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_retrieved
SCREAMING_SNAKE_CASE_ : Tuple = do_deduplication
SCREAMING_SNAKE_CASE_ : List[str] = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.generator ,'forced_eos_token_id' ,snake_case__ )
@classmethod
def snake_case ( cls ,snake_case__ ,snake_case__ ,**snake_case__ ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = self.generator.to_dict()
SCREAMING_SNAKE_CASE_ : int = self.__class__.model_type
return output
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__a : str = DiTPipeline
__a : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__a : Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__a : Any = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__a : str = False
def snake_case ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__a ,activation_fn='gelu-approximate' ,num_embeds_ada_norm=1000 ,norm_type='ada_norm_zero' ,norm_elementwise_affine=__a ,)
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : Dict = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : str = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self ,snake_case__ ,snake_case__=0 ):
if str(__a ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(__a )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=__a ).manual_seed(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = "cpu"
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(__a )
SCREAMING_SNAKE_CASE_ : str = pipe(**__a ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
SCREAMING_SNAKE_CASE_ : Any = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a ,1E-3 )
def snake_case ( self ):
self._test_inference_batch_single_identical(relax_max_difference=__a ,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Dict = ["vase", "umbrella", "white shark", "white wolf"]
SCREAMING_SNAKE_CASE_ : Any = pipe.get_label_ids(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(__a ,generator=__a ,num_inference_steps=40 ,output_type='np' ).images
for word, image in zip(__a ,__a ):
SCREAMING_SNAKE_CASE_ : List[str] = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Tuple = ["vase", "umbrella"]
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.get_label_ids(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(__a ,generator=__a ,num_inference_steps=25 ,output_type='np' ).images
for word, image in zip(__a ,__a ):
SCREAMING_SNAKE_CASE_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
from __future__ import annotations
import time
UpperCamelCase__ : List[Any] = list[tuple[int, int]]
UpperCamelCase__ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase__ : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = pos_x
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pos_y
SCREAMING_SNAKE_CASE_ : Tuple = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ : Optional[int] = goal_x
SCREAMING_SNAKE_CASE_ : Optional[Any] = goal_y
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,_snake_case )
SCREAMING_SNAKE_CASE_ : Tuple = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,_snake_case )
SCREAMING_SNAKE_CASE_ : Tuple = [self.start]
SCREAMING_SNAKE_CASE_ : List[Any] = False
def snake_case ( self ):
while self.node_queue:
SCREAMING_SNAKE_CASE_ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE_ : List[str] = True
return self.retrace_path(_snake_case )
SCREAMING_SNAKE_CASE_ : Any = self.get_successors(_snake_case )
for node in successors:
self.node_queue.append(_snake_case )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for action in delta:
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case ,_snake_case ,self.target.pos_y ,self.target.pos_x ,_snake_case ) )
return successors
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = node
SCREAMING_SNAKE_CASE_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ : Dict = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = BreadthFirstSearch(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BreadthFirstSearch(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE_ : Any = False
def snake_case ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE_ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE_ : List[str] = True
return self.retrace_bidirectional_path(
_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = current_bwd_node
SCREAMING_SNAKE_CASE_ : Any = current_fwd_node
SCREAMING_SNAKE_CASE_ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.fwd_bfs.retrace_path(_snake_case )
SCREAMING_SNAKE_CASE_ : int = self.bwd_bfs.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_ : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCamelCase__ : Any = (0, 0)
UpperCamelCase__ : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : Optional[Any] = BreadthFirstSearch(init, goal)
UpperCamelCase__ : Dict = bfs.search()
UpperCamelCase__ : int = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : str = BidirectionalBreadthFirstSearch(init, goal)
UpperCamelCase__ : Optional[int] = bd_bfs.search()
UpperCamelCase__ : List[str] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
from ... import PretrainedConfig
UpperCamelCase__ : Any = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( UpperCamelCase__ ):
__a : int = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__a : Dict = "nezha"
def __init__( self ,snake_case__=21128 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=64 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=0.1 ,snake_case__=0 ,snake_case__=2 ,snake_case__=3 ,snake_case__=True ,**snake_case__ ,):
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = max_relative_position
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ : Any = use_cache
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ : Any = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase__ : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, \"{attribute}\"' in modeling_source
or F'getattr(self.config, \"{attribute}\"' in modeling_source
):
SCREAMING_SNAKE_CASE_ : int = True
# Deal with multi-line cases
elif (
re.search(
RF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"' , __UpperCamelCase , )
is not None
):
SCREAMING_SNAKE_CASE_ : Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE_ : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
SCREAMING_SNAKE_CASE_ : List[str] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if not attribute_used:
SCREAMING_SNAKE_CASE_ : int = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE_ : Any = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE_ : Dict = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
elif attribute.endswith('_token_id' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE_ : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
SCREAMING_SNAKE_CASE_ : Dict = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
SCREAMING_SNAKE_CASE_ : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.getsourcefile(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = os.path.dirname(__UpperCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for fn in os.listdir(__UpperCamelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
SCREAMING_SNAKE_CASE_ : int = []
for path in modeling_paths:
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE_ : Any = []
for config_param, default_value in zip(__UpperCamelCase , __UpperCamelCase ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE_ : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(__UpperCamelCase )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE_ : Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCamelCase_ : inspect.isclass(__UpperCamelCase )
and issubclass(__UpperCamelCase , __UpperCamelCase )
and inspect.getmodule(__UpperCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE_ : List[str] = check_config_attributes_being_used(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unused_attributes
if len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = TensorFlowBenchmark(args=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
SCREAMING_SNAKE_CASE_ : List[str] = ' '.join(str(lowerCamelCase_ ).split(' ' )[:-1] )
SCREAMING_SNAKE_CASE_ : List[str] = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = eval(str(lowerCamelCase_ ).split(' ' )[-1] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Dict = full_error_msg + begin_error_msg + str(lowerCamelCase_ )
raise ValueError(lowerCamelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 32 ,snake_case__ = True ,snake_case__ = 1 / 255 ,snake_case__ = True ,snake_case__ = True ,snake_case__ = [0.48145466, 0.4578275, 0.40821073] ,snake_case__ = [0.26862954, 0.26130258, 0.27577711] ,snake_case__ = True ,snake_case__=7 ,snake_case__=30 ,snake_case__=400 ,snake_case__=3 ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {"shortest_edge": 288}
SCREAMING_SNAKE_CASE_ : Tuple = size_divisor
SCREAMING_SNAKE_CASE_ : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE_ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = do_center_crop
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std
SCREAMING_SNAKE_CASE_ : Any = do_pad
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = min_resolution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_resolution
def snake_case ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if not batched:
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : List[str] = image_inputs[0]
if isinstance(snake_case__ ,Image.Image ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE_ : Dict = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE_ : Any = size / min(snake_case__ ,snake_case__ )
if h < w:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size, scale * w
else:
SCREAMING_SNAKE_CASE_ : Dict = scale * h, size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int((1333 / 800) * size )
if max(snake_case__ ,snake_case__ ) > max_size:
SCREAMING_SNAKE_CASE_ : int = max_size / max(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = newh * scale
SCREAMING_SNAKE_CASE_ : List[Any] = neww * scale
SCREAMING_SNAKE_CASE_ : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Tuple = max(snake_case__ ,key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = max(snake_case__ ,key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( _A , unittest.TestCase ):
__a : Optional[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BridgeTowerImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'image_mean' ) )
self.assertTrue(hasattr(snake_case__ ,'image_std' ) )
self.assertTrue(hasattr(snake_case__ ,'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'size_divisor' ) )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image processor
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ : int = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image processor
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : str = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ : Optional[Any] = k.replace(a__ , a__ )
return k
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE_ : List[str] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE_ : int = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ : Tuple = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ : Optional[int] = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ : Dict = v.T
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ : int = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ : List[str] = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ : int = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE_ : Any = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def __UpperCAmelCase ( lowerCamelCase_ : int="./ckpt/aeslc/model.ckpt-32000" ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(a__ , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = array
return tf_weights
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE_ : Any = task_specific_params[F'summarization_{dataset}']['max_position_embeddings']
SCREAMING_SNAKE_CASE_ : List[str] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE_ : List[str] = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE_ : str = task_specific_params[F'summarization_{dataset}']
if dataset == "large":
SCREAMING_SNAKE_CASE_ : Any = task_specific_params
SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(a__ , Path(a__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase__ : List[str] = parser.parse_args()
if args.save_dir is None:
UpperCamelCase__ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
UpperCamelCase__ : List[str] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase__ : List[str] = namedtuple('''covid_data''', '''cases deaths recovered''')
def __UpperCAmelCase ( lowerCamelCase_ : str = "https://www.worldometers.info/coronavirus/" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '//div[@class = \"maincounter-number\"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
UpperCamelCase__ : Any = '''Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__="resnet50" ,snake_case__=3 ,snake_case__=32 ,snake_case__=3 ,snake_case__=True ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : Tuple = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE_ : Optional[Any] = stage_names
SCREAMING_SNAKE_CASE_ : Any = out_features
SCREAMING_SNAKE_CASE_ : Dict = backbone
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : int = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : List[str] = is_training
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, pixel_values
def snake_case ( self ):
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = TimmBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__a : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
__a : Optional[int] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__a : Optional[int] = False
__a : List[str] = False
__a : str = False
__a : str = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE )
def snake_case ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 'resnet18'
SCREAMING_SNAKE_CASE_ : Dict = 'microsoft/resnet-18'
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ,use_timm_backbone=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE_ : Tuple = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ,use_timm_backbone=__SCREAMING_SNAKE_CASE ,out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE_ : int = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def snake_case ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def snake_case ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def snake_case ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def snake_case ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def snake_case ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def snake_case ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def snake_case ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def snake_case ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE_ : Tuple = self.all_model_classes[0]
SCREAMING_SNAKE_CASE_ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = model(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE_ : Tuple = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE_ : str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(**__SCREAMING_SNAKE_CASE )
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( UpperCamelCase_ ):
__a : Tuple = ["pixel_values"]
def __init__( self ,snake_case__ = True ,snake_case__ = None ,snake_case__ = PILImageResampling.BICUBIC ,snake_case__ = True ,snake_case__ = None ,snake_case__ = True ,snake_case__ = 1 / 255 ,snake_case__ = True ,snake_case__ = None ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
super().__init__(**__a )
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(__a ,default_to_square=__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(__a ,default_to_square=__a ,param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : List[str] = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : int = resample
SCREAMING_SNAKE_CASE_ : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] = crop_size
SCREAMING_SNAKE_CASE_ : Any = do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Optional[int] = do_convert_rgb
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = PILImageResampling.BICUBIC ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(__a ,default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ : int = get_resize_output_image_size(__a ,size=size['shortest_edge'] ,default_to_square=__a )
return resize(__a ,size=__a ,resample=__a ,data_format=__a ,**__a )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__a ,size=(size['height'], size['width']) ,data_format=__a ,**__a )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
return rescale(__a ,scale=__a ,data_format=__a ,**__a )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
return normalize(__a ,mean=__a ,std=__a ,data_format=__a ,**__a )
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = ChannelDimension.FIRST ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(__a ,param_name='size' ,default_to_square=__a )
SCREAMING_SNAKE_CASE_ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : int = get_size_dict(__a ,param_name='crop_size' ,default_to_square=__a )
SCREAMING_SNAKE_CASE_ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : Dict = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] = [to_numpy_array(__a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Any = [self.resize(image=__a ,size=__a ,resample=__a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.center_crop(image=__a ,size=__a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Any = [self.rescale(image=__a ,scale=__a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.normalize(image=__a ,mean=__a ,std=__a ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(__a ,__a ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': images}
return BatchFeature(data=__a ,tensor_type=__a )
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
import argparse
import os
import re
import packaging.version
UpperCamelCase__ : Tuple = '''examples/'''
UpperCamelCase__ : Union[str, Any] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
UpperCamelCase__ : Union[str, Any] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
UpperCamelCase__ : Tuple = '''README.md'''
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Tuple = replace.replace('VERSION' , _lowercase )
SCREAMING_SNAKE_CASE_ : Dict = re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_lowercase )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='examples' )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=False ) -> Tuple:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '🤗 Transformers currently provides the following architectures'
SCREAMING_SNAKE_CASE_ : List[Any] = '1. Want to contribute a new model?'
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
SCREAMING_SNAKE_CASE_ : Dict = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_lowercase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_lowercase )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.read()
SCREAMING_SNAKE_CASE_ : Any = REPLACE_PATTERNS['init'][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : List[str] = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : List[Any] = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : Tuple = input(F'Which version are you releasing? [{default_version}]' )
if len(_lowercase ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = default_version
print(F'Updating version to {version}.' )
global_version_update(_lowercase , patch=_lowercase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_version()
SCREAMING_SNAKE_CASE_ : Optional[int] = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Tuple = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : List[Any] = input(F'Which version are we developing now? [{dev_version}]' )
if len(_lowercase ) == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = dev_version
print(F'Updating version to {version}.' )
global_version_update(_lowercase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
UpperCamelCase__ : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
from __future__ import annotations
from random import random
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : List[Any] = value
SCREAMING_SNAKE_CASE_ : str = random()
SCREAMING_SNAKE_CASE_ : Node | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1 )
def __str__( self ):
SCREAMING_SNAKE_CASE_ : str = str(self.value ) + """ """
SCREAMING_SNAKE_CASE_ : List[Any] = str(self.left or '' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(self.right or '' )
return value + left + right
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = split(root.left , _lowerCamelCase )
return left, root
else:
SCREAMING_SNAKE_CASE_ : Any = split(root.right , _lowerCamelCase )
return root, right
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE_ : str = merge(left.right , _lowerCamelCase )
return left
else:
SCREAMING_SNAKE_CASE_ : List[Any] = merge(_lowerCamelCase , right.left )
return right
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Node(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = split(_lowerCamelCase , value - 1 )
SCREAMING_SNAKE_CASE_ : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def __UpperCAmelCase ( lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE_ : Optional[int] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE_ : Any = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
SCREAMING_SNAKE_CASE_ : Tuple = input()
while args != "q":
SCREAMING_SNAKE_CASE_ : Tuple = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(len(lowercase_ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE_ : int = True
for j in range(lowercase_ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE_ : List[str] = False
break
if match_found:
position.append(lowercase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
__a : Tuple = 'SpeechT5FeatureExtractor'
__a : int = 'SpeechT5Tokenizer'
def __init__( self ,snake_case__ ,snake_case__ ):
super().__init__(_a ,_a )
def __call__( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('audio' ,_a )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('text' ,_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('text_target' ,_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('audio_target' ,_a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('sampling_rate' ,_a )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(_a ,*_a ,sampling_rate=_a ,**_a )
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(_a ,**_a )
else:
SCREAMING_SNAKE_CASE_ : List[str] = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.feature_extractor(audio_target=_a ,*_a ,sampling_rate=_a ,**_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['input_values']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(_a ,**_a )
SCREAMING_SNAKE_CASE_ : int = targets['input_ids']
else:
SCREAMING_SNAKE_CASE_ : Dict = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = labels
SCREAMING_SNAKE_CASE_ : int = targets.get('attention_mask' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : List[str] = decoder_attention_mask
return inputs
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('input_values' ,_a )
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('input_ids' ,_a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('labels' ,_a )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(_a ,*_a ,**_a )
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.pad(_a ,**_a )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_a ,_a ) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.pad(_a ,**_a )
SCREAMING_SNAKE_CASE_ : str = targets['input_ids']
else:
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extractor.pad(_a ,*_a ,**_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_size_hack
SCREAMING_SNAKE_CASE_ : int = targets['input_values']
else:
SCREAMING_SNAKE_CASE_ : List[str] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = labels
SCREAMING_SNAKE_CASE_ : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : int = decoder_attention_mask
return inputs
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*_a ,**_a )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*_a ,**_a )
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( lowerCamelCase_ ):
@staticmethod
@abstractmethod
def snake_case ( snake_case__ ):
raise NotImplementedError()
@abstractmethod
def snake_case ( self ):
raise NotImplementedError()
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowerCamelCase_ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowerCamelCase_ ) == 1:
return True
SCREAMING_SNAKE_CASE_ : Optional[Any] = series[1] - series[0]
for index in range(len(lowerCamelCase_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowerCamelCase_ ) == 0:
raise ValueError('Input list must be a non empty list' )
SCREAMING_SNAKE_CASE_ : List[str] = 0
for val in series:
answer += val
return answer / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 7 , lowerCamelCase_ : int = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
SCREAMING_SNAKE_CASE_ : Any = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_numerator
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Dict ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
)
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE_ : str = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
__a : Optional[int] = None
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
__a : str = PandasConfig
def snake_case ( self ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self ,snake_case__ ):
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
SCREAMING_SNAKE_CASE_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ ,(str, list, tuple) ):
SCREAMING_SNAKE_CASE_ : Tuple = data_files
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Dict = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[Any] = [dl_manager.iter_files(snake_case__ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case__ ,gen_kwargs={'files': files} ) )
return splits
def snake_case ( self ,snake_case__ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ : str = table_cast(snake_case__ ,self.config.features.arrow_schema )
return pa_table
def snake_case ( self ,snake_case__ ):
for i, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ ,'rb' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = pa.Table.from_pandas(pd.read_pickle(snake_case__ ) )
yield i, self._cast_table(snake_case__ )
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase__ : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase__ : List[Any] = TaTokenizerFast
UpperCamelCase__ : int = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCamelCase__ : Any = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ : Union[str, Any] = '''PoolFormerConfig'''
# Base docstring
UpperCamelCase__ : List[Any] = '''sail/poolformer_s12'''
UpperCamelCase__ : Union[str, Any] = [1, 5_12, 7, 7]
# Image classification docstring
UpperCamelCase__ : Any = '''sail/poolformer_s12'''
UpperCamelCase__ : Union[str, Any] = '''tabby, tabby cat'''
UpperCamelCase__ : Tuple = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str = 0.0 , lowerCamelCase_ : Optional[Any] = False ) -> Union[str, Any]:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE_ : Tuple = 1 - drop_prob
SCREAMING_SNAKE_CASE_ : int = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE_ : int = keep_prob + torch.rand(lowerCAmelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE_ : Dict = input.div(lowerCAmelCase__ ) * random_tensor
return output
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ = None ):
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = drop_prob
def snake_case ( self ,snake_case__ ):
return drop_path(snake_case__ ,self.drop_prob ,self.training )
def snake_case ( self ):
return "p={}".format(self.drop_prob )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ):
super().__init__()
SCREAMING_SNAKE_CASE_ : int = patch_size if isinstance(snake_case__ ,collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE_ : List[str] = stride if isinstance(snake_case__ ,collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE_ : Any = padding if isinstance(snake_case__ ,collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE_ : int = nn.Convad(snake_case__ ,snake_case__ ,kernel_size=snake_case__ ,stride=snake_case__ ,padding=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = norm_layer(snake_case__ ) if norm_layer else nn.Identity()
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.projection(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.norm(snake_case__ )
return embeddings
class lowerCAmelCase_ ( nn.GroupNorm ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(1 ,snake_case__ ,**snake_case__ )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.AvgPoolad(snake_case__ ,stride=1 ,padding=pool_size // 2 ,count_include_pad=snake_case__ )
def snake_case ( self ,snake_case__ ):
return self.pool(snake_case__ ) - hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Convad(snake_case__ ,snake_case__ ,1 )
SCREAMING_SNAKE_CASE_ : Any = nn.Convad(snake_case__ ,snake_case__ ,1 )
SCREAMING_SNAKE_CASE_ : List[str] = PoolFormerDropPath(snake_case__ )
if isinstance(config.hidden_act ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_ : Tuple = config.hidden_act
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.conva(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.act_fn(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.drop(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.conva(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.drop(snake_case__ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = PoolFormerPooling(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = PoolFormerOutput(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = PoolFormerGroupNorm(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = PoolFormerGroupNorm(snake_case__ )
# Useful for training neural nets
SCREAMING_SNAKE_CASE_ : int = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE_ : Dict = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) ,requires_grad=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) ,requires_grad=snake_case__ )
def snake_case ( self ,snake_case__ ):
if self.use_layer_scale:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pooling(self.before_norm(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_states + self.drop_path(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = ()
SCREAMING_SNAKE_CASE_ : Tuple = self.output(self.after_norm(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE_ : Any = hidden_states + self.drop_path(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) )
# First residual connection
SCREAMING_SNAKE_CASE_ : int = pooling_output + hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE_ : Dict = self.drop_path(self.output(self.after_norm(snake_case__ ) ) )
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_states + layer_output
SCREAMING_SNAKE_CASE_ : Tuple = (output,) + outputs
return outputs
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE_ : Optional[int] = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.ModuleList(snake_case__ )
# Transformer blocks
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE_ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case__ ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Any = nn.ModuleList(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=True ):
SCREAMING_SNAKE_CASE_ : Tuple = () if output_hidden_states else None
SCREAMING_SNAKE_CASE_ : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE_ : Optional[int] = embedding_layer(snake_case__ )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = blk(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ ,hidden_states=snake_case__ )
class lowerCAmelCase_ ( __a ):
__a : Any = PoolFormerConfig
__a : int = '''poolformer'''
__a : Union[str, Any] = '''pixel_values'''
__a : str = True
def snake_case ( self ,snake_case__ ):
if isinstance(snake_case__ ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case__ ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = value
UpperCamelCase__ : Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ : str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __a , )
class lowerCAmelCase_ ( __a ):
def __init__( self ,snake_case__ ):
super().__init__(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PoolFormerEncoder(snake_case__ )
# Initialize weights and apply final processing
self.post_init()
def snake_case ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case ( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
SCREAMING_SNAKE_CASE_ : Tuple = self.encoder(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case__ ,hidden_states=encoder_outputs.hidden_states ,)
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(config.hidden_size ,config.hidden_size )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.dense(snake_case__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __a , )
class lowerCAmelCase_ ( __a ):
def __init__( self ,snake_case__ ):
super().__init__(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config.num_labels
SCREAMING_SNAKE_CASE_ : str = PoolFormerModel(snake_case__ )
# Final norm
SCREAMING_SNAKE_CASE_ : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE_ : Any = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case ( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : str = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Tuple = self.poolformer(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = outputs[0]
SCREAMING_SNAKE_CASE_ : Any = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ : Any = 'single_label_classification'
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ : str = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(snake_case__ ,snake_case__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ : int = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ : Optional[int] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ : int = loss_fct(snake_case__ ,snake_case__ )
if not return_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case__ ,logits=snake_case__ ,hidden_states=outputs.hidden_states )
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCamelCase__ : Optional[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCamelCase__ : int = 0
UpperCamelCase__ : Tuple = 1
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : List[Any] = 3
UpperCamelCase__ : Tuple = 4
class lowerCAmelCase_ ( __lowerCamelCase ):
__a : Tuple = VOCAB_FILES_NAMES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = '''left'''
def __init__( self ,snake_case__ ,snake_case__=False ,snake_case__=True ,snake_case__=False ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="<unk>" ,snake_case__="<sep>" ,snake_case__="<pad>" ,snake_case__="<cls>" ,snake_case__="<mask>" ,snake_case__=["<eop>", "<eod>"] ,snake_case__ = None ,**snake_case__ ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token
SCREAMING_SNAKE_CASE_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ ,remove_space=a_ ,keep_accents=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,pad_token=a_ ,cls_token=a_ ,mask_token=a_ ,additional_special_tokens=a_ ,sp_model_kwargs=self.sp_model_kwargs ,**a_ ,)
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : Dict = do_lower_case
SCREAMING_SNAKE_CASE_ : str = remove_space
SCREAMING_SNAKE_CASE_ : Tuple = keep_accents
SCREAMING_SNAKE_CASE_ : Dict = vocab_file
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self ,snake_case__ ):
if self.remove_space:
SCREAMING_SNAKE_CASE_ : Optional[int] = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ : str = inputs
SCREAMING_SNAKE_CASE_ : Any = outputs.replace('``' ,'\"' ).replace('\'\'' ,'\"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ : Dict = unicodedata.normalize('NFKD' ,a_ )
SCREAMING_SNAKE_CASE_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : Any = outputs.lower()
return outputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(a_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.encode(a_ ,out_type=a_ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ : int = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def snake_case ( self ,snake_case__ ):
return self.sp_model.PieceToId(a_ )
def snake_case ( self ,snake_case__ ):
return self.sp_model.IdToPiece(a_ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = "".join(a_ ).replace(a_ ,' ' ).strip()
return out_string
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('use_source_tokenizer' ,a_ )
SCREAMING_SNAKE_CASE_ : List[str] = self.convert_ids_to_tokens(a_ ,skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "".join(a_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ ,token_ids_a=a_ ,already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not os.path.isdir(a_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ : List[str] = {
'input_ids': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ )['last_hidden_state']
SCREAMING_SNAKE_CASE_ : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,snake_case__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
import operator as op
UpperCamelCase__ : List[str] = 'scaler.pt'
UpperCamelCase__ : int = 'pytorch_model'
UpperCamelCase__ : Optional[int] = 'random_states'
UpperCamelCase__ : int = 'optimizer'
UpperCamelCase__ : Union[str, Any] = 'scheduler'
UpperCamelCase__ : List[Any] = 'pytorch_model.bin'
UpperCamelCase__ : Tuple = 'pytorch_model.bin.index.json'
UpperCamelCase__ : Any = 'model.safetensors'
UpperCamelCase__ : Any = 'model.safetensors.index.json'
UpperCamelCase__ : List[Any] = '1.10.2'
UpperCamelCase__ : List[Any] = 'py38'
UpperCamelCase__ : str = '4.17.0'
UpperCamelCase__ : Optional[Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCamelCase__ : List[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCamelCase__ : Dict = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCamelCase__ : Optional[int] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCamelCase__ : Union[str, Any] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCamelCase__ : int = '2.0.1'
UpperCamelCase__ : int = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCamelCase__ : List[Any] = ['default', 'reduce-overhead', 'max-autotune']
UpperCamelCase__ : str = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase__ : Optional[Any] = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCamelCase__ : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCamelCase__ : List[str] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase_ :
__a : Any = 42
__a : Optional[Any] = None
# Automatically constructed
__a : Optional[Any] = "dict"
__a : Dict = None
__a : Dict = field(default="Translation" , init=_A , repr=_A )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase_ :
__a : Dict = None
__a : int = None
__a : List[str] = None
# Automatically constructed
__a : List[str] = "dict"
__a : str = None
__a : str = field(default="TranslationVariableLanguages" , init=_A , repr=_A )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : Any = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def snake_case ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCamelCase__ : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class lowerCAmelCase_ ( snake_case__ ):
__a : List[str] = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_INIT_CONFIGURATION
__a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = BertTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=True ,snake_case__="[UNK]" ,snake_case__="[SEP]" ,snake_case__="[PAD]" ,snake_case__="[CLS]" ,snake_case__="[MASK]" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Any = getattr(_A ,normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE_ : List[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : int = normalizer_class(**_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_lower_case
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : int = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase__ : Any = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ : Dict = getattr(__snake_case , __snake_case )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(__snake_case , __snake_case ).shape
else:
SCREAMING_SNAKE_CASE_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Tuple = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : List[str] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Optional[int] = value
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
SCREAMING_SNAKE_CASE_ : List[Any] = None
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ : Any = True
elif name.split('.' )[0] == "proj":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fairseq_model.proj
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : List[str] = name.split(__snake_case )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ : Any = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : str = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : Any = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'weight'
else:
SCREAMING_SNAKE_CASE_ : str = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ : Any = name.split('.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(items[0] )
SCREAMING_SNAKE_CASE_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ : Tuple = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
SCREAMING_SNAKE_CASE_ : Any = emb.weight.data
return lin_layer
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> str:
"""simple docstring"""
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.readlines()
SCREAMING_SNAKE_CASE_ : str = [line.split(' ' )[0] for line in lines]
SCREAMING_SNAKE_CASE_ : Any = len(__snake_case )
SCREAMING_SNAKE_CASE_ : int = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = WavaVecaConfig.from_pretrained(__snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
SCREAMING_SNAKE_CASE_ : Any = model[0].eval()
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE_ : List[Any] = WavaVecaModel(__snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = recursively_load_weights_wavaveca(model.encoder , __snake_case )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SpeechaTextaForCausalLM(__snake_case )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('embed_out' )
SCREAMING_SNAKE_CASE_ : List[str] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
SCREAMING_SNAKE_CASE_ : List[Any] = False
# add projection layer
SCREAMING_SNAKE_CASE_ : int = nn.Parameter(projection_layer.weight )
SCREAMING_SNAKE_CASE_ : str = nn.Parameter(projection_layer.bias )
SCREAMING_SNAKE_CASE_ : Dict = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , 'vocab.json' ) , 'w' ) as fp:
json.dump(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechaTextaTokenizer(os.path.join(__snake_case , 'vocab.json' ) )
tokenizer.save_pretrained(__snake_case )
SCREAMING_SNAKE_CASE_ : Dict = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ : str = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ : Any = 'speech_to_text_2'
SCREAMING_SNAKE_CASE_ : int = 'wav2vec2'
SCREAMING_SNAKE_CASE_ : Tuple = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
UpperCamelCase__ : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ : Tuple = """The dog is cute and lives in the garden house"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array([tokenizer.encode(lowerCamelCase_ )] )
SCREAMING_SNAKE_CASE_ : List[str] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
SCREAMING_SNAKE_CASE_ : Any = model(lowerCamelCase_ )["""last_hidden_state"""]
self.assertEqual(output.shape ,lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,lowerCamelCase_ ,atol=1E-3 ) )
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
import os
import sys
import unittest
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCamelCase__ : Any = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
UpperCamelCase__ : Any = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_test_to_tester_mapping(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = get_test_to_tester_mapping(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = {'BertModelTest': 'BertModelTester'}
SCREAMING_SNAKE_CASE_ : int = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) ,UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) ,UpperCamelCase_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_model_to_test_mapping(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_model_to_test_mapping(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
SCREAMING_SNAKE_CASE_ : Dict = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) ,UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) ,UpperCamelCase_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = get_model_to_tester_mapping(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = get_model_to_tester_mapping(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
SCREAMING_SNAKE_CASE_ : Dict = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) ,UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) ,UpperCamelCase_ )
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( _UpperCamelCase ):
__a : int = (DDIMParallelScheduler,)
__a : List[Any] = (("eta", 0.0), ("num_inference_steps", 50))
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case__ )
return config
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = 10, 0.0
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
return sample
def snake_case ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ ,beta_end=snake_case__ )
def snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def snake_case ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def snake_case ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case__ )
def snake_case ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case__ )
def snake_case ( self ):
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ ,prediction_type=snake_case__ ,sample_max_value=snake_case__ ,)
def snake_case ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case__ )
def snake_case ( self ):
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=snake_case__ ,num_inference_steps=snake_case__ )
def snake_case ( self ):
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case__ ,eta=snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1E-5
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 10, 0.0
scheduler.set_timesteps(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ : Optional[Any] = samplea.shape[0]
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([samplea, samplea, samplea] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Any = torch.arange(snake_case__ )[0:3, None].repeat(1 ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.batch_step_no_noise(snake_case__ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop(set_alpha_to_one=snake_case__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : str = self.full_loop(set_alpha_to_one=snake_case__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import datasets
from .evaluate import evaluate
UpperCamelCase__ : Optional[int] = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
UpperCamelCase__ : Union[str, Any] = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
UpperCamelCase__ : str = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) ,codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] ,reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] ,)
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
SCREAMING_SNAKE_CASE_ : Dict = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ : List[str] = evaluate(dataset=lowercase_ ,predictions=lowercase_ )
return score
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = '''Hello, World!'''
UpperCamelCase__ : int = '''en_XX'''
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path('data_bin' )
SCREAMING_SNAKE_CASE_ : int = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(snake_case__ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(snake_case__ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE_ : Dict = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , snake_case__ )
SCREAMING_SNAKE_CASE_ : int = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ : int = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE_ : Any = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE_ : List[str] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ : int = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE_ : str = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ : List[str] = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ : str = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ : str = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
SCREAMING_SNAKE_CASE_ : int = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ : List[Any] = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ : List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ : str = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
SCREAMING_SNAKE_CASE_ : Dict = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : str = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE_ : int = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Any = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE_ : List[str] = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE_ : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE_ : Tuple = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = from_adapter.fca.bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] = from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE_ : List[str] = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE_ : Any = xmod.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE_ : Dict = xmod.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE_ : List[Any] = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ : List[str] = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ : List[Any] = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ : str = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ : int = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ : int = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ : Dict = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ : Any = xmod.model.classification_heads['mnli'](xmod.extract_features(snake_case__ ) )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=99 ,snake_case__=13 ,snake_case__=7 ,snake_case__=9 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__=8 ,snake_case__=0.1 ,snake_case__=0.002 ,snake_case__=1 ,snake_case__=0 ,snake_case__=0 ,snake_case__=None ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = encoder_seq_length
SCREAMING_SNAKE_CASE_ : str = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE_ : Optional[int] = self.decoder_seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_attention_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = d_ff
SCREAMING_SNAKE_CASE_ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ : Optional[Any] = dropout_rate
SCREAMING_SNAKE_CASE_ : int = initializer_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Dict = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_start_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = decoder_layers
def snake_case ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=UpperCAmelCase__ )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
SCREAMING_SNAKE_CASE_ : Tuple = config.num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
return config, input_dict
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self ):
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def snake_case ( self ):
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
input_ids=UpperCAmelCase__ ,decoder_input_ids=UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,decoder_attention_mask=UpperCAmelCase__ ,)
SCREAMING_SNAKE_CASE_ : int = model(input_ids=UpperCAmelCase__ ,decoder_input_ids=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = result.last_hidden_state
SCREAMING_SNAKE_CASE_ : Dict = result.past_key_values
SCREAMING_SNAKE_CASE_ : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCAmelCase__ ,use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = model(UpperCAmelCase__ ,use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(UpperCAmelCase__ ,past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ ,UpperCAmelCase__ ,atol=1E-3 ) )
def snake_case ( self ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
SCREAMING_SNAKE_CASE_ : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__a : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__a : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__a : Dict = True
__a : List[str] = False
__a : Optional[int] = False
__a : Optional[int] = True
__a : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__a : int = [0.8, 0.9]
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F'{tmpdirname}/t5_test.onnx' ,export_params=UpperCAmelCase__ ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : int = config_and_inputs[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = {
'''head_mask''': torch.zeros(config.num_layers ,config.num_heads ,device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ ,head_masking.items() ):
SCREAMING_SNAKE_CASE_ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE_ : List[str] = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=UpperCAmelCase__ ,return_dict_in_generate=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE_ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def snake_case ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=UpperCAmelCase__ ,legacy=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(UpperCAmelCase__ ,return_tensors='pt' ,padding=UpperCAmelCase__ ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ : Any = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> tuple[str, float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len([g for position, g in enumerate(lowerCamelCase_ ) if g == main_target[position]] )
return (item, float(lowerCamelCase_ ))
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> tuple[str, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = random.randint(0 , len(lowerCamelCase_ ) - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE_ : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(lowerCamelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE_ : List[Any] = random.choice(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = population_score[random.randint(0 , lowerCamelCase_ )][0]
SCREAMING_SNAKE_CASE_ : Tuple = crossover(parent_a[0] , lowerCamelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCamelCase_ , lowerCamelCase_ ) )
pop.append(mutate(lowerCamelCase_ , lowerCamelCase_ ) )
return pop
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE_ : Dict = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE_ : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE_ : int = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCamelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE_ : Dict = []
for _ in range(lowerCamelCase_ ):
population.append(''.join([random.choice(lowerCamelCase_ ) for i in range(len(lowerCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE_ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE_ : str = [evaluate(lowerCamelCase_ , lowerCamelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE_ : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCamelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE_ : str = [
(item, score / len(lowerCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase_ ):
population.extend(select(population_score[int(lowerCamelCase_ )] , lowerCamelCase_ , lowerCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ : str = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase__ : Any = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
UpperCamelCase__ : int = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase_ ( __lowerCamelCase ):
__a : Tuple = 42
__a : Union[str, Any] = 42
class lowerCAmelCase_ ( nn.Module ):
__a : Any = 42
__a : Dict = (16, 32, 96, 2_56)
__a : Dict = jnp.floataa
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ : Dict = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = blocks
SCREAMING_SNAKE_CASE_ : int = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.conv_in(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = nn.silu(SCREAMING_SNAKE_CASE_ )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ : Any = block(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = nn.silu(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = self.conv_out(SCREAMING_SNAKE_CASE_ )
return embedding
@flax_register_to_config
class lowerCAmelCase_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
__a : List[str] = 32
__a : int = 4
__a : Union[str, Any] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a : int = False
__a : Optional[int] = (3_20, 6_40, 12_80, 12_80)
__a : List[str] = 2
__a : Tuple = 8
__a : Optional[Any] = None
__a : int = 12_80
__a : Union[str, Any] = 0.0
__a : Tuple = False
__a : Optional[Any] = jnp.floataa
__a : str = True
__a : Tuple = 0
__a : int = "rgb"
__a : Union[str, Any] = (16, 32, 96, 2_56)
def snake_case ( self ,snake_case__ ):
# init input tensors
SCREAMING_SNAKE_CASE_ : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ : int = jnp.zeros(SCREAMING_SNAKE_CASE_ ,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.ones((1,) ,dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ : int = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ : Dict = jnp.zeros(SCREAMING_SNAKE_CASE_ ,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = jax.random.split(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )["params"]
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.block_out_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ : Any = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
SCREAMING_SNAKE_CASE_ : int = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE_ ,dtype=self.dtype )
SCREAMING_SNAKE_CASE_ : str = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Dict = block_out_channels[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ : Dict = output_channel
SCREAMING_SNAKE_CASE_ : Any = block_out_channels[i]
SCREAMING_SNAKE_CASE_ : Optional[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ : Any = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ ,out_channels=SCREAMING_SNAKE_CASE_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
SCREAMING_SNAKE_CASE_ : int = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ ,out_channels=SCREAMING_SNAKE_CASE_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(SCREAMING_SNAKE_CASE_ )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ : int = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
if not is_final_block:
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = down_blocks
SCREAMING_SNAKE_CASE_ : str = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ : int = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=SCREAMING_SNAKE_CASE_ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1.0 ,snake_case__ = True ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Any = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ : Dict = jnp.flip(SCREAMING_SNAKE_CASE_ ,axis=1 )
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE_ ,jnp.ndarray ):
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE_ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ : Dict = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.expand_dims(SCREAMING_SNAKE_CASE_ ,0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.time_proj(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.time_embedding(SCREAMING_SNAKE_CASE_ )
# 2. pre-process
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.transpose(SCREAMING_SNAKE_CASE_ ,(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : int = self.conv_in(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = jnp.transpose(SCREAMING_SNAKE_CASE_ ,(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.controlnet_cond_embedding(SCREAMING_SNAKE_CASE_ )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ : int = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = down_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = down_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ : str = self.mid_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(SCREAMING_SNAKE_CASE_ ,self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ : Tuple = controlnet_block(SCREAMING_SNAKE_CASE_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ : List[str] = self.controlnet_mid_block(SCREAMING_SNAKE_CASE_ )
# 6. scaling
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=SCREAMING_SNAKE_CASE_ ,mid_block_res_sample=SCREAMING_SNAKE_CASE_ )
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_ ( _snake_case , unittest.TestCase ):
__a : Any = TextToVideoSDPipeline
__a : str = TEXT_TO_IMAGE_PARAMS
__a : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__a : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def snake_case ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') ,up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE_ : List[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=snake_case_ ,set_alpha_to_one=snake_case_ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(snake_case_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def snake_case ( self ,snake_case__ ,snake_case__=0 ):
if str(snake_case_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(snake_case_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = TextToVideoSDPipeline(**snake_case_ )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(snake_case_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "np"
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(**snake_case_ ).frames
SCREAMING_SNAKE_CASE_ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ ,expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case ( self ):
pass
def snake_case ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
SCREAMING_SNAKE_CASE_ : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : List[Any] = "Spiderman is surfing"
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = pipe(snake_case_ ,generator=snake_case_ ,num_inference_steps=25 ,output_type='pt' ).frames
SCREAMING_SNAKE_CASE_ : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
SCREAMING_SNAKE_CASE_ : str = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE_ : int = pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Any = "Spiderman is surfing"
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = pipe(snake_case_ ,generator=snake_case_ ,num_inference_steps=2 ,output_type='pt' ).frames
SCREAMING_SNAKE_CASE_ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCamelCase__ : Any = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : Optional[int] = 3
UpperCamelCase__ : Dict = 4
class lowerCAmelCase_ ( lowercase__ ):
__a : str = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : str = "left"
def __init__( self ,snake_case__ ,snake_case__=False ,snake_case__=True ,snake_case__=False ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="<unk>" ,snake_case__="<sep>" ,snake_case__="<pad>" ,snake_case__="<cls>" ,snake_case__="<mask>" ,snake_case__=["<eop>", "<eod>"] ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(UpperCAmelCase__ ,lstrip=UpperCAmelCase__ ,rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) else mask_token
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ ,remove_space=UpperCAmelCase__ ,keep_accents=UpperCAmelCase__ ,bos_token=UpperCAmelCase__ ,eos_token=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,sep_token=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,cls_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,additional_special_tokens=UpperCAmelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**UpperCAmelCase__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Tuple = remove_space
SCREAMING_SNAKE_CASE_ : List[Any] = keep_accents
SCREAMING_SNAKE_CASE_ : Tuple = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self ,snake_case__ ):
if self.remove_space:
SCREAMING_SNAKE_CASE_ : str = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ : int = inputs
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ : Any = unicodedata.normalize('NFKD' ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : int = outputs.lower()
return outputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.preprocess_text(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.encode(UpperCAmelCase__ ,out_type=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def snake_case ( self ,snake_case__ ):
return self.sp_model.PieceToId(UpperCAmelCase__ )
def snake_case ( self ,snake_case__ ):
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ ,' ' ).strip()
return out_string
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('use_source_tokenizer' ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.convert_ids_to_tokens(UpperCAmelCase__ ,skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE_ : Any = ''''''.join(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_ : str = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ ,token_ids_a=UpperCAmelCase__ ,already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
UpperCAmelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase_ ( __UpperCAmelCase ):
__a : int = "realm"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=128 ,snake_case__=12 ,snake_case__=12 ,snake_case__=8 ,snake_case__=3072 ,snake_case__="gelu_new" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=256 ,snake_case__=10 ,snake_case__=1E-3 ,snake_case__=5 ,snake_case__=320 ,snake_case__=13353718 ,snake_case__=5000 ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
# Common config
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = retriever_proj_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_candidates
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ : int = span_hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_span_width
SCREAMING_SNAKE_CASE_ : Optional[int] = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = reader_beam_size
SCREAMING_SNAKE_CASE_ : int = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ : List[str] = num_block_records
SCREAMING_SNAKE_CASE_ : List[Any] = searcher_beam_size
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase__ ):
__a : List[str] = "encoder-decoder"
__a : Any = True
def __init__( self ,**snake_case__ ):
super().__init__(**__lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('encoder' )
SCREAMING_SNAKE_CASE_ : Any = encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('decoder' )
SCREAMING_SNAKE_CASE_ : Tuple = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : str = True
@classmethod
def snake_case ( cls ,snake_case__ ,snake_case__ ,**snake_case__ ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**__lowerCamelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encoder.to_dict()
SCREAMING_SNAKE_CASE_ : str = self.decoder.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ,return_dict=_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ : str = tokenizer('Hello there' ,return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer('Hi I am' ,return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE_ : Any = model(input_ids.to(_lowercase ) ,labels=labels.to(_lowercase ) ).loss
SCREAMING_SNAKE_CASE_ : List[str] = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE_ : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any = None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = word_bank or []
# create a table
SCREAMING_SNAKE_CASE_ : int = len(__SCREAMING_SNAKE_CASE ) + 1
SCREAMING_SNAKE_CASE_ : list[list[list[str]]] = []
for _ in range(__SCREAMING_SNAKE_CASE ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE_ : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__SCREAMING_SNAKE_CASE ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__SCREAMING_SNAKE_CASE )] == word:
SCREAMING_SNAKE_CASE_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__SCREAMING_SNAKE_CASE )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__SCREAMING_SNAKE_CASE )]:
combination.reverse()
return table[len(__SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.