code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : Tuple = 7
UpperCAmelCase_ : Any = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase_ ( lowerCamelCase = 20 ):
__magic_name__ : Union[str, Any] =math.comb(lowerCamelCase , lowerCamelCase )
__magic_name__ : Tuple =math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase )
__magic_name__ : Any =NUM_COLOURS * (1 - missing_colour / total)
return F"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 21 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 0 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class A :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 13 , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 1_28 , lowerCAmelCase_ : Any=[16, 32, 64, 1_28] , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 37 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 10 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1_28 , lowerCAmelCase_ : List[int] = [2, 2, 2, 2] , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> Optional[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = encoder_stride
_a = num_attention_outputs
_a = embed_dim
_a = embed_dim + 1
_a = resolution
_a = depths
_a = hidden_sizes
_a = dim
_a = mlp_expansion_ratio
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
_a = TFEfficientFormerModel(config=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> Tuple:
"""simple docstring"""
_a = self.type_sequence_label_size
_a = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = TFEfficientFormerModelTester(self )
_a = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ):
_a = model_class(lowerCAmelCase_ )
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
_a = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
_a = seq_length * self.model_tester.chunk_length
else:
_a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_a = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_a = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase_ )
_a = getattr(self.model_tester , '''decoder_seq_length''' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False ) -> Dict:
"""simple docstring"""
_a = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase_ )
_a = getattr(self.model_tester , '''encoder_seq_length''' , lowerCAmelCase_ )
_a = getattr(self.model_tester , '''key_length''' , lowerCAmelCase_ )
_a = getattr(self.model_tester , '''chunk_length''' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
_a = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(lowerCAmelCase_ )
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(lowerCAmelCase_ )
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_a = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_a = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_a = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' )
# forward pass
_a = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_a = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' )
# forward pass
_a = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 0 |
import string
def _snake_case (__lowercase):
for key in range(len(string.ascii_uppercase)):
UpperCamelCase_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase_ = string.ascii_uppercase.find(__lowercase)
UpperCamelCase_ = num - key
if num < 0:
UpperCamelCase_ = num + len(string.ascii_uppercase)
UpperCamelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCamelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""")
def _snake_case ():
UpperCamelCase_ = input('Encrypted message: ')
UpperCamelCase_ = message.upper()
decrypt(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 23 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> int:
'''simple docstring'''
__snake_case = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__snake_case = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
__snake_case = '''huggingface/label-files'''
if "o365" in model_name:
__snake_case = 3_66
__snake_case = '''object365-id2label.json'''
else:
__snake_case = 91
__snake_case = '''coco-detection-id2label.json'''
__snake_case = num_labels
__snake_case = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any )-> List[Any]:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Dict )-> Union[str, Any]:
'''simple docstring'''
__snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:dim, :]
__snake_case = in_proj_bias[: dim]
__snake_case = in_proj_weight[
dim : dim * 2, :
]
__snake_case = in_proj_bias[
dim : dim * 2
]
__snake_case = in_proj_weight[
-dim :, :
]
__snake_case = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] )-> Any:
'''simple docstring'''
__snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:hidden_size, :]
__snake_case = in_proj_bias[:hidden_size]
__snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__snake_case = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case = in_proj_weight[-hidden_size:, :]
__snake_case = in_proj_bias[-hidden_size:]
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__snake_case = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__snake_case = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__snake_case = state_dict.pop(_lowerCamelCase )
__snake_case = val
if "input_proj" in key:
__snake_case = state_dict.pop(_lowerCamelCase )
__snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__snake_case = state_dict.pop(_lowerCamelCase )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_lowerCamelCase )
# load image processor
__snake_case = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__snake_case = prepare_img()
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' )
__snake_case = encoding['''pixel_values''']
__snake_case = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__snake_case = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__snake_case = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__snake_case = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__snake_case = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 0 |
def lowerCamelCase__ ( _a):
if a < 0:
raise ValueError("Input value must be a positive integer")
elif isinstance(_a , _a):
raise TypeError("Input value must be a 'int' type")
return bin(_a).count("1")
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 26 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : str = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["MobileViTFeatureExtractor"]
__A : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[Any]=True ,__UpperCamelCase: Dict="pt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {'add_prefix_space': True} if isinstance(__UpperCamelCase ,__UpperCamelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE : Any = padding_side
return tokenizer(
[line] ,max_length=__UpperCamelCase ,padding='max_length' if pad_to_max_length else None ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: str ,__UpperCamelCase: List[str]=None ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A, A="train", A=None, A=None, A=None, A="", ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = Path(A ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE : int = Path(A ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE : Any = max_source_length
SCREAMING_SNAKE_CASE : str = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE : Any = tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE : Any = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE : Optional[int] = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE : List[str] = self.prefix + linecache.getline(str(self.src_file ), A ).rstrip('\n' )
SCREAMING_SNAKE_CASE : Tuple = linecache.getline(str(self.tgt_file ), A ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer, A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, A ) else self.tokenizer
)
SCREAMING_SNAKE_CASE : str = self.tokenizer.generator if isinstance(self.tokenizer, A ) else self.tokenizer
SCREAMING_SNAKE_CASE : int = encode_line(A, A, self.max_source_length, 'right' )
SCREAMING_SNAKE_CASE : List[str] = encode_line(A, A, self.max_target_length, 'right' )
SCREAMING_SNAKE_CASE : Tuple = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE : Dict = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
return [len(A ) for x in Path(A ).open().readlines()]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE : Dict = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, A )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, A )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Dict = trim_batch(A, A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = trim_batch(A, A, attention_mask=A )
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def lowercase__( __UpperCamelCase: List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_git_info()
save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,'git_log.json' ) )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Dict=4 ,**__UpperCamelCase: str ):
"""simple docstring"""
with open(__UpperCamelCase ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase ,**__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = git.Repo(search_parent_directories=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'repo_id': str(__UpperCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Iterable ):
"""simple docstring"""
return list(map(__UpperCamelCase ,__UpperCamelCase ) )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ):
"""simple docstring"""
with open(__UpperCamelCase ,'wb' ) as f:
return pickle.dump(__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
def remove_articles(__UpperCamelCase: str ):
return re.sub(r'\b(a|an|the)\b' ,' ' ,__UpperCamelCase )
def white_space_fix(__UpperCamelCase: Tuple ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase: int ):
SCREAMING_SNAKE_CASE : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase: int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = normalize_answer(__UpperCamelCase ).split()
SCREAMING_SNAKE_CASE : Any = normalize_answer(__UpperCamelCase ).split()
SCREAMING_SNAKE_CASE : str = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE : List[str] = 1.0 * num_same / len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = 1.0 * num_same / len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ):
"""simple docstring"""
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for hypo, pred in zip(__UpperCamelCase ,__UpperCamelCase ):
em += exact_match_score(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return model_prefix.startswith('rag' )
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Dict ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if not hasattr(__UpperCamelCase ,__UpperCamelCase ) and not hasattr(__UpperCamelCase ,equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCamelCase ) )
delattr(__UpperCamelCase ,__UpperCamelCase )
continue
SCREAMING_SNAKE_CASE : Optional[Any] = p if hasattr(__UpperCamelCase ,__UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) )
delattr(__UpperCamelCase ,__UpperCamelCase )
return hparams, config
| 28 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
A_ = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: List[str] = VOCAB_FILES_NAMES
a__: Any = PRETRAINED_VOCAB_FILES_MAP
a__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Union[str, Any] = ['input_ids', 'attention_mask']
a__: List[str] = CodeGenTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
if kwargs.pop('''add_bos_token''' , UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCAmelCase )
lowerCamelCase_ = add_prefix_space
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = super().decode(
token_ids=UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , **UpperCAmelCase , )
if truncate_before_pattern is not None and len(UpperCAmelCase ) > 0:
lowerCamelCase_ = self.truncate(UpperCAmelCase , UpperCAmelCase )
return decoded_text
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
def find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = pattern.search(UpperCAmelCase , UpperCAmelCase )
return m.start() if m else -1
lowerCamelCase_ = [re.compile(UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase_ = list(re.finditer('''^print''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowerCamelCase_ = completion[: prints[1].start()]
lowerCamelCase_ = list(re.finditer('''^def''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowerCamelCase_ = completion[: defs[1].start()]
lowerCamelCase_ = 0
lowerCamelCase_ = [
pos for pos in [find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase ) > 0:
return completion[: min(UpperCAmelCase )]
else:
return completion
| 29 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=1024 ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : List[str] = [], []
UpperCAmelCase_ : Optional[Any] = list(zip(_lowercase , _lowercase ) )
UpperCAmelCase_, UpperCAmelCase_ : int = sorted_examples[0]
def is_too_big(_lowercase ):
return tok(_lowercase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase_ : int = new_src + ''' ''' + src
UpperCAmelCase_ : int = new_tgt + ''' ''' + tgt
if is_too_big(_lowercase ) or is_too_big(_lowercase ): # cant fit, finalize example
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = src, tgt
else: # can fit, keep adding
UpperCAmelCase_, UpperCAmelCase_ : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
return finished_src, finished_tgt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = Path(_lowercase )
save_path.mkdir(exist_ok=_lowercase )
for split in ["train"]:
UpperCAmelCase_, UpperCAmelCase_ : Any = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
UpperCAmelCase_ : List[str] = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCAmelCase_ : List[str] = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase )
print(f'''packed {split} split from {len(_lowercase )} examples -> {len(_lowercase )}.''' )
Path(save_path / f'''{split}.source''' ).open('''w''' ).write('''\n'''.join(_lowercase ) )
Path(save_path / f'''{split}.target''' ).open('''w''' ).write('''\n'''.join(_lowercase ) )
for split in ["val", "test"]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(_lowercase , save_path / f'''{split}.source''' )
shutil.copyfile(_lowercase , save_path / f'''{split}.target''' )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=_lowercase , default=128 )
parser.add_argument('''--data_dir''' , type=_lowercase )
parser.add_argument('''--save_path''' , type=_lowercase )
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 30 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = use_mc_token_ids
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : int ):
pass
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE_ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase ) | 31 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''swish''' )
self.assertIsInstance(_UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''silu''' )
self.assertIsInstance(_UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''mish''' )
self.assertIsInstance(_UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''gelu''' )
self.assertIsInstance(_UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) | 32 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' ,snake_case_ ,)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = RobertaConfig
__lowercase : List[str] = 'roberta'
def __init__( self:List[str] , _a:Union[str, Any] ):
super().__init__(_a )
snake_case__ = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' ,snake_case_ ,)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = RobertaConfig
__lowercase : int = 'roberta'
def __init__( self:str , _a:Optional[Any] ):
super().__init__(_a )
snake_case__ = config.num_labels
snake_case__ = config.num_hidden_layers
snake_case__ = DeeRobertaModel(_a )
snake_case__ = nn.Dropout(config.hidden_dropout_prob )
snake_case__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Any=None , _a:Any=None , _a:str=None , _a:Optional[Any]=None , _a:Union[str, Any]=None , _a:Optional[Any]=None , _a:Dict=None , _a:str=-1 , _a:Optional[int]=False , ):
snake_case__ = self.num_layers
try:
snake_case__ = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
snake_case__ = outputs[1]
snake_case__ = self.dropout(_a )
snake_case__ = self.classifier(_a )
snake_case__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ = e.message
snake_case__ = e.exit_layer
snake_case__ = outputs[0]
if not self.training:
snake_case__ = entropy(_a )
snake_case__ = []
snake_case__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ = MSELoss()
snake_case__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case__ = []
for highway_exit in outputs[-1]:
snake_case__ = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ = MSELoss()
snake_case__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
snake_case__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ = (loss,) + outputs
if not self.training:
snake_case__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 33 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase ( unittest.TestCase ):
lowerCamelCase : Any = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE__ : List[str] = text_generator('''This is a test''' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_lowercase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
SCREAMING_SNAKE_CASE__ : List[str] = text_generator('''This is a test''' , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase )
self.assertEqual(
_lowercase , [
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = '''<pad>'''
SCREAMING_SNAKE_CASE__ : Tuple = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , )
self.assertEqual(
_lowercase , [
[
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
],
[
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
],
] , )
@require_tf
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator('''This is a test''' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
SCREAMING_SNAKE_CASE__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def lowercase__ ( self : Dict , _lowercase : int , _lowercase : Tuple , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Tuple = TextGenerationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_generator, ["This is a test", "Another test"]
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''Hello I believe in'''
SCREAMING_SNAKE_CASE__ : int = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator(_lowercase )
self.assertEqual(
_lowercase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
SCREAMING_SNAKE_CASE__ : str = text_generator(_lowercase , stop_sequence=''' fe''' )
self.assertEqual(_lowercase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def lowercase__ ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_generator.model
SCREAMING_SNAKE_CASE__ : Tuple = text_generator.tokenizer
SCREAMING_SNAKE_CASE__ : Any = text_generator('''This is a test''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator('''This is a test''' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(task='''text-generation''' , model=_lowercase , tokenizer=_lowercase , return_full_text=_lowercase )
SCREAMING_SNAKE_CASE__ : int = text_generator('''This is a test''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator('''This is a test''' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
] , )
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = text_generator('''test''' , return_full_text=_lowercase , return_text=_lowercase )
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator('''test''' , return_full_text=_lowercase , return_tensors=_lowercase )
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = text_generator('''test''' , return_text=_lowercase , return_tensors=_lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator('''''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE__ : int = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE__ : Any = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowercase ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase__ ( self : str ):
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE__ : int = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE__ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def lowercase__ ( self : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase__ ( self : Any ):
import torch
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_lowercase , top_p=0.5 )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''Hello world'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger('''transformers.generation.tf_utils''' )
else:
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger('''transformers.generation.utils''' )
SCREAMING_SNAKE_CASE__ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowercase ) as cl:
SCREAMING_SNAKE_CASE__ : Dict = text_generator(_lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowercase ) as cl:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_generator(_lowercase , max_new_tokens=1 )
self.assertNotIn(_lowercase , cl.out )
with CaptureLogger(_lowercase ) as cl:
SCREAMING_SNAKE_CASE__ : Tuple = text_generator(_lowercase , max_length=10 )
self.assertNotIn(_lowercase , cl.out )
| 35 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['''DeiTFeatureExtractor''']
__lowercase : Tuple = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a__ : Optional[int] = model
a__ : str = kwargs.get("model_save_dir" , lowerCamelCase__ )
a__ : List[str] = kwargs.get("latest_model_name" , lowerCamelCase__ )
def __call__( self : Tuple , **lowerCamelCase__ : Tuple ):
a__ : Union[str, Any] = {k: np.array(lowerCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCamelCase__ , lowerCamelCase__ )
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a__ : List[Any] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCamelCase__ , providers=[provider] , sess_options=lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : Tuple ):
a__ : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a__ : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
a__ : Optional[int] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a__ : Optional[int] = self.model_save_dir.joinpath(lowerCamelCase__ )
if src_path.exists():
a__ : Union[str, Any] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ )
except shutil.SameFileError:
pass
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : Optional[Any] , ):
if os.path.isfile(lowerCamelCase__ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# saving model weights/files
self._save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def _UpperCamelCase( cls : List[Any] , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[Union[bool, str, None]] = None , lowerCamelCase__ : Optional[Union[str, None]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional["ort.SessionOptions"] = None , **lowerCamelCase__ : Union[str, Any] , ):
a__ : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCamelCase__ ):
a__ : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ )
a__ : List[Any] = Path(lowerCamelCase__ )
# load model from hub
else:
# download model
a__ : Any = hf_hub_download(
repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , )
a__ : Optional[int] = Path(lowerCamelCase__ ).parent
a__ : Union[str, Any] = Path(lowerCamelCase__ ).name
a__ : Union[str, Any] = OnnxRuntimeModel.load_model(lowerCamelCase__ , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ )
return cls(model=lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def _UpperCamelCase( cls : Dict , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : List[str] , ):
a__ : List[str] = None
if len(str(lowerCamelCase__ ).split("@" ) ) == 2:
a__, a__ : Optional[Any] = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , **lowerCamelCase__ , )
| 37 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : int , ) ->Tuple:
snake_case_ = parent
snake_case_ = 1_3
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = 9_9
snake_case_ = 3_2
snake_case_ = 2
snake_case_ = 4
snake_case_ = 3_7
snake_case_ = '''gelu'''
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 5_1_2
snake_case_ = 1_6
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = None
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Any ) ->List[str]:
snake_case_ = TFDistilBertModel(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->str:
snake_case_ = TFDistilBertForMaskedLM(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) ->Optional[int]:
snake_case_ = TFDistilBertForQuestionAnswering(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple ) ->str:
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForSequenceClassification(_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) ->Optional[Any]:
snake_case_ = self.num_choices
snake_case_ = TFDistilBertForMultipleChoice(_UpperCamelCase )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForTokenClassification(_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Union[str, Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
((snake_case_), (snake_case_), (snake_case_), (snake_case_), (snake_case_), (snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = TFDistilBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , dim=3_7 )
def snake_case__( self : str ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_UpperCamelCase )
def snake_case__( self : str ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->Union[str, Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case_ = TFDistilBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) | 39 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | 0 |
def UpperCamelCase ( snake_case__ : List[str] ) -> Optional[int]:
UpperCamelCase : int = [0] * len(snake_case__ )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Optional[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Tuple = queue.pop(0 )
cnt += 1
topo.append(snake_case__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case__ )
if cnt != len(snake_case__ ):
print('Cycle exists' )
else:
print(snake_case__ )
# Adjacency List of Graph
__UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 40 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def _A ( A__ = 8 , A__ = None ):
"""simple docstring"""
__lowercase = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowercase = 6 * key_len
# Measurement basis for Alice's qubits.
__lowercase = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
__lowercase = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
__lowercase = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
__lowercase = qiskit.QuantumCircuit(A__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowercase = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowercase = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
__lowercase = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowercase = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowercase = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '''0''' )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 41 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(__snake_case ):
_UpperCamelCase = y[k] + step_size * ode_func(__snake_case, y[k] )
_UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 | 0 |
'''simple docstring'''
A_ = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 42 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 19 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = 0
while number > 0:
_lowerCamelCase : Optional[Any] = number % 10
sum_of_digits += last_digit
_lowerCamelCase : Dict = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A_ ( _lowerCAmelCase : int = 100 ):
"""simple docstring"""
_lowerCamelCase : List[str] = factorial(_lowerCAmelCase )
_lowerCamelCase : List[Any] = split_and_add(_lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 44 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 0 |
from __future__ import annotations
from math import gcd
def A ( lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
return (pow(lowercase__ , 2 ) + step) % modulus
for _ in range(lowercase__ ):
# These track the position within the cycle detection logic.
UpperCamelCase__ :Dict = seed
UpperCamelCase__ :List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ :Optional[int] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :List[str] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :str = rand_fn(lowercase__ , lowercase__ , lowercase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ :str = gcd(hare - tortoise , lowercase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ :Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
UpperCamelCase = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''') | 45 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'gpt_neox_japanese'
def __init__( self: str ,__lowerCAmelCase: List[str]=32_000 ,__lowerCAmelCase: Tuple=2_560 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: Optional[Any]=1.00 ,__lowerCAmelCase: int=10_000 ,__lowerCAmelCase: List[str]=2_048 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: Tuple=1e-5 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=31_996 ,__lowerCAmelCase: Union[str, Any]=31_999 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: int=0.0 ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_multiple_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : str = rotary_pct
_lowerCamelCase : Any = rotary_emb_base
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : int = hidden_dropout | 46 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase__ : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase__ : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowerCAmelCase__ = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple=None ):
"""simple docstring"""
lowerCAmelCase__ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase__ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase__ = black.format_str(__magic_name__ , mode=__magic_name__ )
lowerCAmelCase__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(__magic_name__ , "w" , newline="\n" ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , "r" ) as f:
self.assertTrue(f.read() , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __magic_name__ ) , )
# Copy consistency with a really long name
lowerCAmelCase__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __magic_name__ , overwrite_result=re.sub("Bert" , "TestModel" , __magic_name__ ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
lowerCAmelCase__ ,lowerCAmelCase__ = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["format_model_list"] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ ,lowerCAmelCase__ = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase__ ,lowerCAmelCase__ = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 48 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = '▁'
UpperCamelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCamelCase : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCamelCase : str = {'vinai/bartpho-syllable': 10_24}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = vocab_file
lowerCamelCase__ = monolingual_vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ = {}
lowerCamelCase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = cnt
cnt += 1
with open(_lowerCAmelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
lowerCamelCase__ = line.strip().split()[0]
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase ,""" """ ).strip()
return out_string
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 50 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : List[str] , a__ : Tuple=13 , a__ : Optional[int]=10 , a__ : Any=3 , a__ : Tuple=2 , a__ : Tuple=2 , a__ : Optional[int]=2 , a__ : Optional[int]=True , a__ : Dict=True , a__ : int=32 , a__ : List[str]=5 , a__ : Union[str, Any]=4 , a__ : Optional[int]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : Dict=0.1 , a__ : Tuple=10 , a__ : Dict=0.02 , a__ : Dict=0.9 , a__ : int=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = tubelet_size
UpperCAmelCase = num_frames
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = mask_ratio
UpperCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
UpperCAmelCase = int(mask_ratio * self.seq_length )
def __snake_case ( self : str ):
UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Any ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def __snake_case ( self : str , a__ : List[str] , a__ : str , a__ : Dict ):
UpperCAmelCase = VideoMAEModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[Any] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : int ):
UpperCAmelCase = VideoMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCAmelCase = torch.ones((self.num_masks,) )
UpperCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
UpperCAmelCase = mask.expand(self.batch_size , -1 ).bool()
UpperCAmelCase = model(a__ , a__ )
# model only returns predictions for masked patches
UpperCAmelCase = mask.sum().item()
UpperCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase =(
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Tuple ):
UpperCAmelCase = VideoMAEModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __snake_case ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : Any , a__ : int=False ):
UpperCAmelCase = copy.deepcopy(a__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCAmelCase = torch.ones((self.model_tester.num_masks,) )
UpperCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
UpperCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
UpperCAmelCase = bool_masked_pos.to(a__ )
if return_labels:
if model_class in [
*get_values(a__ ),
]:
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def __snake_case ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __snake_case ( self : Optional[Any] ):
pass
def __snake_case ( self : List[str] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __snake_case ( self : Any ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
@slow
def __snake_case ( self : int ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = VideoMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( self : Dict ):
if not self.has_attentions:
pass
else:
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
UpperCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase = len(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self : int ):
def check_hidden_states_output(a__ : int , a__ : str , a__ : Dict ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a__ ) , a__ )
UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
UpperCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self : Dict ):
pass
def __snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
UpperCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Optional[int] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : int ):
UpperCAmelCase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_video()
UpperCAmelCase = image_processor(a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_video()
UpperCAmelCase = image_processor(a__ , return_tensors='''pt''' ).to(a__ )
# add boolean mask, indicating which patches to mask
UpperCAmelCase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
UpperCAmelCase = torch.load(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a__ )
self.assertEqual(outputs.logits.shape , a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
UpperCAmelCase = torch.tensor([0.5_142] , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
UpperCAmelCase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=a__ ).to(
a__ )
with torch.no_grad():
UpperCAmelCase = model(**a__ )
UpperCAmelCase = torch.tensor(torch.tensor([0.6_469] ) , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1e-4 ) )
| 51 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A = logging.getLogger(__name__)
def __A ( a_ :Dict , a_ :Any) -> str:
return (preds == labels).mean()
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__lowerCAmelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__lowerCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
__a , __a , __a : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , a_)
# Set seed
set_seed(training_args.seed)
try:
__a : Optional[Any] = processors[data_args.task_name]()
__a : Dict = processor.get_labels()
__a : str = len(a_)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__a : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__a : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path) , config=a_ , cache_dir=model_args.cache_dir , )
# Get datasets
__a : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__a : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a_ :EvalPrediction) -> Dict:
__a : int = np.argmax(p.predictions , axis=1)
return {"acc": simple_accuracy(a_ , p.label_ids)}
# Data collator
__a : List[Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
__a : List[Any] = Trainer(
model=a_ , args=a_ , train_dataset=a_ , eval_dataset=a_ , compute_metrics=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
__a : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
__a : Any = trainer.evaluate()
__a : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''')
if trainer.is_world_master():
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' , a_ , a_)
writer.write('''%s = %s\n''' % (key, value))
results.update(a_)
return results
def __A ( a_ :int) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 52 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 0 |
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 53 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
__lowercase : List[Any] =torch.device("""cpu""")
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def a__ ( lowercase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =dct.pop(lowercase__ )
UpperCAmelCase_ =val
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for k in state_dict.keys():
UpperCAmelCase_ =k
if ".pwconv" in k:
UpperCAmelCase_ =k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ =k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ =k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ =k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ =k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ ="swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ =k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ =1_0_0_0
UpperCAmelCase_ ="huggingface/label-files"
UpperCAmelCase_ ="imagenet-1k-id2label.json"
UpperCAmelCase_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ ={int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ =idalabel
UpperCAmelCase_ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ =[3, 3, 6, 4]
UpperCAmelCase_ =[4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ =[3, 3, 9, 6]
UpperCAmelCase_ =[4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ =[4, 3, 1_0, 5]
UpperCAmelCase_ =[4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ =[4, 4, 1_2, 6]
UpperCAmelCase_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ =torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" , check_hash=lowercase__ )
else:
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
UpperCAmelCase_ =checkpoint
UpperCAmelCase_ =create_rename_keys(lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
UpperCAmelCase_ =SwiftFormerForImageClassification(lowercase__ ).eval()
hf_model.load_state_dict(lowercase__ )
# prepare test inputs
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ =processor(images=lowercase__ , return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ =get_expected_output(lowercase__ )
UpperCAmelCase_ =hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase__ , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__lowercase : List[Any] =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 54 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = CLIPConfig
snake_case_ = ["CLIPEncoderLayer"]
def __init__( self : Tuple ,A : CLIPConfig ):
super().__init__(A )
__A = CLIPVisionModelWithProjection(config.vision_config )
__A = nn.Linear(config.vision_config.projection_dim ,1 )
__A = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def UpperCamelCase_ ( self : Tuple ,A : List[str] ,A : Any ,A : Optional[Any]=0.5 ,A : List[Any]=0.5 ):
__A = self.vision_model(A )[0]
__A = self.p_head(A )
__A = nsfw_detected.flatten()
__A = nsfw_detected > p_threshold
__A = nsfw_detected.tolist()
if any(A ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(A ):
if nsfw_detected_:
__A = np.zeros(images[idx].shape )
__A = self.w_head(A )
__A = watermark_detected.flatten()
__A = watermark_detected > w_threshold
__A = watermark_detected.tolist()
if any(A ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(A ):
if watermark_detected_:
__A = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 55 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int] = False
_a : Dict = False
def _a (lowercase__ : Namespace ) -> int:
"""simple docstring"""
return TrainCommand(lowercase__ )
class _lowercase ( __lowercase ):
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Any:
__snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE_ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE_ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE_ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE_ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE_ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Namespace ) -> Optional[int]:
__snake_case = logging.get_logger('transformers-cli/training' )
__snake_case = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = args.output
__snake_case = args.column_label
__snake_case = args.column_text
__snake_case = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
__snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
__snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
__snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = args.validation_split
__snake_case = args.train_batch_size
__snake_case = args.valid_batch_size
__snake_case = args.learning_rate
__snake_case = args.adam_epsilon
def a ( self : Optional[int] ) -> int:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a ( self : Tuple ) -> List[str]:
raise NotImplementedError
def a ( self : Dict ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 56 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 | 0 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = BlenderbotSmallConfig
lowercase_ = {}
lowercase_ = "gelu"
def __init__(self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Tuple=0 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =parent
lowerCamelCase__: Optional[int] =batch_size
lowerCamelCase__: Tuple =seq_length
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: Union[str, Any] =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: Optional[int] =hidden_size
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: Tuple =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: int =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =max_position_embeddings
lowerCamelCase__: Tuple =eos_token_id
lowerCamelCase__: Tuple =pad_token_id
lowerCamelCase__: Optional[int] =bos_token_id
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowerCamelCase__: List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowerCamelCase__: List[Any] =tf.concat([input_ids, eos_tensor] , axis=1)
lowerCamelCase__: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: List[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__: Optional[Any] =prepare_blenderbot_small_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =TFBlenderbotSmallModel(config=UpperCAmelCase_).get_decoder()
lowerCamelCase__: str =inputs_dict["input_ids"]
lowerCamelCase__: Optional[int] =input_ids[:1, :]
lowerCamelCase__: List[str] =inputs_dict["attention_mask"][:1, :]
lowerCamelCase__: List[str] =inputs_dict["head_mask"]
lowerCamelCase__: Tuple =1
# first forward pass
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: int =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Dict =ids_tensor((self.batch_size, 3) , config.vocab_size)
lowerCamelCase__: Optional[int] =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
lowerCamelCase__: List[str] =tf.concat([input_ids, next_tokens] , axis=-1)
lowerCamelCase__: str =tf.concat([attention_mask, next_attn_mask] , axis=-1)
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
lowerCamelCase__: List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1]))
lowerCamelCase__: Dict =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3)
def lowerCAmelCase_ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: List[str] =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__: Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__: str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowercase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =TFBlenderbotSmallModelTester(self)
lowerCamelCase__: Optional[int] =ConfigTester(self , config_class=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_)
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowercase_ = "facebook/blenderbot_small-90M"
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.tokenizer(self.src_text , return_tensors="tf")
lowerCamelCase__: Optional[int] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase_)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 59 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''falcon'''
lowerCamelCase_ : Any = ['''past_key_values''']
def __init__(self , __magic_name__=6_5024 , __magic_name__=4544 , __magic_name__=32 , __magic_name__=71 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=11 , __magic_name__=11 , **__magic_name__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ : Dict = kwargs.pop('''n_embed''' , __magic_name__ )
snake_case_ : Optional[int] = hidden_size if n_embed is None else n_embed
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Optional[Any] = layer_norm_epsilon
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = use_cache
snake_case_ : Optional[int] = hidden_dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : Any = bos_token_id
snake_case_ : Dict = eos_token_id
snake_case_ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case_ : Optional[Any] = alibi
snake_case_ : Optional[int] = new_decoder_architecture
snake_case_ : str = multi_query # Ignored when new_decoder_architecture is True
snake_case_ : List[str] = parallel_attn
snake_case_ : List[str] = bias
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
return not self.alibi
| 60 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = parse_args()
# Import training_script as a module.
lowerCAmelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ = script_fpath.stem
lowerCAmelCase__ = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
lowerCAmelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 61 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
snake_case = NewType("""DataClass""", Any)
snake_case = NewType("""DataClassType""", Any)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if isinstance(lowercase , lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {str(lowercase ): choice for choice in choices}
return lambda lowercase : str_to_choice.get(lowercase , lowercase )
def lowerCamelCase__ ( *,
lowercase = None , lowercase = None , lowercase = dataclasses.MISSING , lowercase = dataclasses.MISSING , lowercase = None , **lowercase , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if aliases is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = aliases
if help is not None:
SCREAMING_SNAKE_CASE : Tuple = help
return dataclasses.field(metadata=lowercase , default=lowercase , default_factory=lowercase , **lowercase )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Iterable[DataClassType]
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[int] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = [dataclass_types]
SCREAMING_SNAKE_CASE : Optional[int] = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def _A ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE : Any = f'''--{field.name}'''
SCREAMING_SNAKE_CASE : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("aliases" , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : str = [aliases]
SCREAMING_SNAKE_CASE : Dict = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , "UnionType" ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE : Tuple = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE : Dict = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE : Dict = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE : Any = field.type.__args__
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [x.value for x in field.type]
SCREAMING_SNAKE_CASE : Tuple = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE : Any = field.default
else:
SCREAMING_SNAKE_CASE : str = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE : List[str] = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE : Any = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE : str = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE : Optional[int] = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE : Tuple = "?"
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE : List[Any] = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = field.type.__args__[0]
SCREAMING_SNAKE_CASE : List[str] = "+"
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
SCREAMING_SNAKE_CASE : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE : str = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE : Union[str, Any] = field.default_factory()
else:
SCREAMING_SNAKE_CASE : Tuple = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE : Tuple = False
parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **UpperCAmelCase_ )
def _A ( self : int , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , "_argument_group_name" ):
SCREAMING_SNAKE_CASE : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE : Dict = self
try:
SCREAMING_SNAKE_CASE : Dict[str, type] = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ".".join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE : Tuple = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE : Tuple = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE : Dict = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip("-" ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE : Any = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE : Tuple = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _A ( self : Optional[Any] , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE : Tuple = set(args.keys() )
SCREAMING_SNAKE_CASE : Tuple = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE : Optional[Any] = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE : Tuple = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}''' )
return tuple(UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding="utf-8" ) as open_json_file:
SCREAMING_SNAKE_CASE : Dict = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE : Dict = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE : Any = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 62 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__UpperCAmelCase : int = number_of_bytes // partitions
__UpperCAmelCase : str = []
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = i * bytes_per_partition + 1
__UpperCAmelCase : Any = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(__snake_case ):
_UpperCamelCase = y[k] + step_size * ode_func(__snake_case, y[k] )
_UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ : Tuple = 'pt'
elif is_tf_available():
lowercase_ : int = 'tf'
else:
lowercase_ : Optional[int] = 'jax'
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PerceiverTokenizer
__a = False
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__: str= PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ) -> int:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=20 , lowerCAmelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE__: Dict= []
for i in range(len(lowerCAmelCase ) ):
try:
SCREAMING_SNAKE_CASE__: str= tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE__: Optional[Any]= list(filter(lambda lowerCAmelCase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase ) , lowerCAmelCase ) )
if max_length is not None and len(lowerCAmelCase ) > max_length:
SCREAMING_SNAKE_CASE__: List[Any]= toks[:max_length]
if min_length is not None and len(lowerCAmelCase ) < min_length and len(lowerCAmelCase ) > 0:
while len(lowerCAmelCase ) < min_length:
SCREAMING_SNAKE_CASE__: str= toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE__: Any= [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
if " " not in output_txt and len(lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE__: Optional[Any]= ''' ''' + output_txt
SCREAMING_SNAKE_CASE__: str= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
return output_txt, output_ids
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: int= '''Unicode €.'''
SCREAMING_SNAKE_CASE__: Dict= tokenizer(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase )
# decoding
SCREAMING_SNAKE_CASE__: int= tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE__: str= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase )
# decoding
SCREAMING_SNAKE_CASE__: str= tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: Optional[int]= ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE__: str= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE__: Any= tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE__: List[str]= list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: str= ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE__: Any= tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase )
self.assertIn('''attention_mask''' , lowerCAmelCase )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Any= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: Dict= [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE__: List[str]= tokenizer(
text_target=lowerCAmelCase , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCamelCase_ ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE__: Tuple= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__: Any= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__: Dict= tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__: int= ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__: str= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.__class__.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__: List[str]= tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__: List[Any]= ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE__: str= tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer.__class__.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__: Dict= tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__: Optional[int]= json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__: List[Any]= json.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= [f'<extra_id_{i}>' for i in range(125 )]
SCREAMING_SNAKE_CASE__: Dict= added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE__: List[str]= added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer_class.from_pretrained(
lowerCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__: Optional[int]= added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase )]
SCREAMING_SNAKE_CASE__: int= tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
SCREAMING_SNAKE_CASE__: List[str]= self.get_tokenizers(fast=lowerCAmelCase , do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE__: List[Any]= ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
SCREAMING_SNAKE_CASE__: Dict= tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
| 64 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 19 | 0 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase__ : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase__ : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
UpperCAmelCase__ : Union[str, Any] = np.concatenate(__UpperCamelCase , axis=0 )
UpperCAmelCase__ : str = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase__ : List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase__ : Optional[int] = 2.0 * image - 1.0
UpperCAmelCase__ : List[Any] = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase__ : List[Any] = torch.cat(__UpperCamelCase , dim=0 )
return image
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0.9995 ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , np.ndarray ):
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Optional[int] = va.device
UpperCAmelCase__ : Tuple = va.cpu().numpy()
UpperCAmelCase__ : Optional[int] = va.cpu().numpy()
UpperCAmelCase__ : Dict = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
UpperCAmelCase__ : Tuple = (1 - t) * va + t * va
else:
UpperCAmelCase__ : str = np.arccos(__UpperCamelCase )
UpperCAmelCase__ : int = np.sin(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = theta_a * t
UpperCAmelCase__ : int = np.sin(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase__ : Tuple = sin_theta_t / sin_theta_a
UpperCAmelCase__ : List[Any] = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase__ : Dict = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = F.normalize(__UpperCamelCase , dim=-1 )
UpperCAmelCase__ : Union[str, Any] = F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for param in model.parameters():
UpperCAmelCase__ : Any = value
class __lowercase ( __lowerCamelCase ):
def __init__( self : Tuple ,A : AutoencoderKL ,A : CLIPTextModel ,A : CLIPModel ,A : CLIPTokenizer ,A : UNetaDConditionModel ,A : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] ,A : CLIPFeatureExtractor ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : str=None ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=A ,text_encoder=A ,clip_model=A ,tokenizer=A ,unet=A ,scheduler=A ,feature_extractor=A ,coca_model=A ,coca_tokenizer=A ,coca_transform=A ,)
UpperCAmelCase__ : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size ,A )
else feature_extractor.size["""shortest_edge"""]
)
UpperCAmelCase__ : str = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,A )
set_requires_grad(self.clip_model ,A )
def __lowercase ( self : Optional[int] ,A : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def __lowercase ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
set_requires_grad(self.vae ,A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
set_requires_grad(self.vae ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
set_requires_grad(self.unet ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.unet ,A )
def __lowercase ( self : Dict ,A : str ,A : List[Any] ,A : int ):
'''simple docstring'''
# get the original timestep using init_timestep
UpperCAmelCase__ : Any = min(int(num_inference_steps * strength ) ,A )
UpperCAmelCase__ : List[Any] = max(num_inference_steps - init_timestep ,0 )
UpperCAmelCase__ : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowercase ( self : str ,A : Optional[int] ,A : Dict ,A : int ,A : Optional[int] ,A : Optional[Any] ,A : int=None ):
'''simple docstring'''
if not isinstance(A ,torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(A )}" )
UpperCAmelCase__ : int = image.to(device=A ,dtype=A )
if isinstance(A ,A ):
UpperCAmelCase__ : List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
UpperCAmelCase__ : Union[str, Any] = torch.cat(A ,dim=0 )
else:
UpperCAmelCase__ : List[Any] = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase__ : Any = 0.1_8_2_1_5 * init_latents
UpperCAmelCase__ : Tuple = init_latents.repeat_interleave(A ,dim=0 )
UpperCAmelCase__ : Any = randn_tensor(init_latents.shape ,generator=A ,device=A ,dtype=A )
# get latents
UpperCAmelCase__ : Optional[Any] = self.scheduler.add_noise(A ,A ,A )
UpperCAmelCase__ : Union[str, Any] = init_latents
return latents
def __lowercase ( self : List[Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase__ : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
UpperCAmelCase__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def __lowercase ( self : str ,A : List[str] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.feature_extractor.preprocess(A )
UpperCAmelCase__ : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase__ : Optional[Any] = self.clip_model.get_image_features(A )
UpperCAmelCase__ : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=A )
UpperCAmelCase__ : Tuple = image_embeddings_clip.repeat_interleave(A ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowercase ( self : Any ,A : List[Any] ,A : List[Any] ,A : int ,A : int ,A : int ,A : List[str] ,A : Optional[int] ,):
'''simple docstring'''
UpperCAmelCase__ : Tuple = latents.detach().requires_grad_()
UpperCAmelCase__ : Tuple = self.scheduler.scale_model_input(A ,A )
# predict the noise residual
UpperCAmelCase__ : List[Any] = self.unet(A ,A ,encoder_hidden_states=A ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase__ : str = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase__ : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase__ : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase__ : int = torch.sqrt(A )
UpperCAmelCase__ : List[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,A ):
UpperCAmelCase__ : List[Any] = self.scheduler.sigmas[index]
UpperCAmelCase__ : Any = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase__ : List[Any] = 1 / 0.1_8_2_1_5 * sample
UpperCAmelCase__ : Union[str, Any] = self.vae.decode(A ).sample
UpperCAmelCase__ : Optional[int] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ : Tuple = transforms.Resize(self.feature_extractor_size )(A )
UpperCAmelCase__ : List[Any] = self.normalize(A ).to(latents.dtype )
UpperCAmelCase__ : Union[str, Any] = self.clip_model.get_image_features(A )
UpperCAmelCase__ : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=A )
UpperCAmelCase__ : Union[str, Any] = spherical_dist_loss(A ,A ).mean() * clip_guidance_scale
UpperCAmelCase__ : List[Any] = -torch.autograd.grad(A ,A )[0]
if isinstance(self.scheduler ,A ):
UpperCAmelCase__ : List[str] = latents.detach() + grads * (sigma**2)
UpperCAmelCase__ : Optional[Any] = noise_pred_original
else:
UpperCAmelCase__ : Tuple = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Dict ,A : Union[torch.FloatTensor, PIL.Image.Image] ,A : Union[torch.FloatTensor, PIL.Image.Image] ,A : Optional[str] = None ,A : Optional[str] = None ,A : Optional[int] = 512 ,A : Optional[int] = 512 ,A : float = 0.6 ,A : Optional[int] = 50 ,A : Optional[float] = 7.5 ,A : Optional[int] = 1 ,A : float = 0.0 ,A : Optional[float] = 100 ,A : Optional[torch.Generator] = None ,A : Optional[str] = "pil" ,A : bool = True ,A : float = 0.8 ,A : float = 0.1 ,A : float = 0.1 ,):
'''simple docstring'''
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(A )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(A ,torch.Generator ) and batch_size > 1:
UpperCAmelCase__ : int = [generator] + [None] * (batch_size - 1)
UpperCAmelCase__ : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
UpperCAmelCase__ : str = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase__ : Optional[Any] = """, """.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCAmelCase__ : Union[str, Any] = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCAmelCase__ : Optional[Any] = self.get_image_description(A )
# get prompt text embeddings for content and style
UpperCAmelCase__ : Any = self.tokenizer(
A ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=A ,return_tensors="""pt""" ,)
UpperCAmelCase__ : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase__ : List[str] = self.tokenizer(
A ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=A ,return_tensors="""pt""" ,)
UpperCAmelCase__ : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase__ : Tuple = slerp(A ,A ,A )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase__ : Any = text_embeddings.repeat_interleave(A ,dim=0 )
# set timesteps
UpperCAmelCase__ : List[Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase__ : Any = {}
if accepts_offset:
UpperCAmelCase__ : List[Any] = 1
self.scheduler.set_timesteps(A ,**A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.get_timesteps(A ,A ,self.device )
UpperCAmelCase__ : List[str] = timesteps[:1].repeat(A )
# Preprocess image
UpperCAmelCase__ : Tuple = preprocess(A ,A ,A )
UpperCAmelCase__ : str = self.prepare_latents(
A ,A ,A ,text_embeddings.dtype ,self.device ,A )
UpperCAmelCase__ : Tuple = preprocess(A ,A ,A )
UpperCAmelCase__ : Dict = self.prepare_latents(
A ,A ,A ,text_embeddings.dtype ,self.device ,A )
UpperCAmelCase__ : int = slerp(A ,A ,A )
if clip_guidance_scale > 0:
UpperCAmelCase__ : List[Any] = self.get_clip_image_embeddings(A ,A )
UpperCAmelCase__ : Any = self.get_clip_image_embeddings(A ,A )
UpperCAmelCase__ : Optional[Any] = slerp(
A ,A ,A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ : Dict = content_text_input.input_ids.shape[-1]
UpperCAmelCase__ : List[Any] = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=A ,return_tensors="""pt""" )
UpperCAmelCase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase__ : Optional[int] = uncond_embeddings.repeat_interleave(A ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ : Dict = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase__ : Union[str, Any] = torch.randn(A ,generator=A ,device="""cpu""" ,dtype=A ).to(
self.device )
else:
UpperCAmelCase__ : Optional[int] = torch.randn(A ,generator=A ,device=self.device ,dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCAmelCase__ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ : Optional[int] = {}
if accepts_eta:
UpperCAmelCase__ : Union[str, Any] = eta
# check if the scheduler accepts generator
UpperCAmelCase__ : str = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase__ : Optional[int] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : int = self.scheduler.scale_model_input(A ,A )
# predict the noise residual
UpperCAmelCase__ : Tuple = self.unet(A ,A ,encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase__ : Optional[int] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.cond_fn(
A ,A ,A ,A ,A ,A ,A ,)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(A ,A ,A ,**A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase__ : Any = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase__ : int = self.vae.decode(A ).sample
UpperCAmelCase__ : Any = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : int = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A ,nsfw_content_detected=A )
| 65 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase : Any = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowercase : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase : List[str] = {'unk_token': '<unk>'}
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCAmelCase ) )
_lowercase : int = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowercase : str = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self ):
shutil.rmtree(self.tmpdirname )
def __a ( self ):
_lowercase : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowercase : int = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ):
_lowercase : Dict = self.get_tokenizer()
_lowercase : Any = self.get_rust_tokenizer()
_lowercase : List[str] = self.get_image_processor()
_lowercase : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
_lowercase : Dict = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowercase : List[str] = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
_lowercase : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Any = self.prepare_image_inputs()
_lowercase : int = image_processor(_lowerCAmelCase , return_tensors='np' )
_lowercase : List[Any] = processor(images=_lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ):
_lowercase : Any = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : str = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Dict = 'lower newer'
_lowercase : Any = processor(text=_lowerCAmelCase )
_lowercase : Tuple = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ):
_lowercase : int = self.get_image_processor()
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : int = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : int = 'lower newer'
_lowercase : Optional[Any] = self.prepare_image_inputs()
_lowercase : List[str] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __a ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(_lowerCAmelCase )
_lowercase : Dict = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : List[Any] = 'lower newer'
_lowercase : List[str] = self.prepare_image_inputs()
_lowercase : Dict = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
) | 67 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCAmelCase =[[1, 2, 4], [1, 2, 3, 4]]
__UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCAmelCase =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def _a ( self : int ) -> Tuple:
__UpperCAmelCase =[[1, 2, 3], [1, 2, 4]]
__UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 )
__UpperCAmelCase =stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 )
__UpperCAmelCase =stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(3 )
__UpperCAmelCase =stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _a ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Union[str, Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 69 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase : Any = "hf-internal-testing/tiny-random-bert"
lowerCamelCase : Optional[int] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowerCamelCase : str = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , 'refs' , 'main' ) ) as f:
lowerCamelCase_ = f.read()
self.assertEqual(A_ , os.path.join(A_ , 'snapshots' , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
lowerCamelCase_ = cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
lowerCamelCase_ = cached_file(A_ , A_ , revision='9b8c223' )
self.assertEqual(A_ , os.path.join(A_ , 'snapshots' , A_ , A_ ) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'is not a valid model identifier' ):
lowerCamelCase_ = cached_file('tiny-random-bert' , A_ )
with self.assertRaisesRegex(A_ , 'is not a valid git identifier' ):
lowerCamelCase_ = cached_file(A_ , A_ , revision='aaaa' )
with self.assertRaisesRegex(A_ , 'does not appear to have a file named' ):
lowerCamelCase_ = cached_file(A_ , 'conf' )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'does not appear to have a file named' ):
lowerCamelCase_ = cached_file(A_ , 'conf' )
with open(os.path.join(A_ , 'refs' , 'main' ) ) as f:
lowerCamelCase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , '.no_exist' , A_ , 'conf' ) ) )
lowerCamelCase_ = cached_file(A_ , 'conf' , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
lowerCamelCase_ = cached_file(A_ , 'conf' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
lowerCamelCase_ = mock.Mock()
lowerCamelCase_ = 500
lowerCamelCase_ = {}
lowerCamelCase_ = HTTPError
lowerCamelCase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
lowerCamelCase_ = cached_file(A_ , 'conf' , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , A_ , revision='ahaha' )
lowerCamelCase_ = get_file_from_repo('bert-base-cased' , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase_ = json.loads(open(A_ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = Path(A_ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(A_ , 'a.txt' ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , 'b.txt' ) )
| 70 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[list[str]] , _SCREAMING_SNAKE_CASE : int , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_SCREAMING_SNAKE_CASE ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print all the boards
for board in boards:
for column in board:
print(_SCREAMING_SNAKE_CASE )
print("" )
print(len(_SCREAMING_SNAKE_CASE ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 71 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def _A( snake_case_ ):
raise NotImplementedError()
@abstractmethod
def _A( self ):
raise NotImplementedError()
| 72 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if sum(_UpperCAmelCase) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase)) < max_sum:
return
if sum(_UpperCAmelCase) == max_sum:
result.append(_UpperCAmelCase)
return
for index in range(_UpperCAmelCase , len(_UpperCAmelCase)):
create_state_space_tree(
_UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , )
a_ : Tuple = [3, 34, 4, 12, 5, 2]
a_ : str = 9
a_ : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 73 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCamelCase_ :
# setable values
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None # sigma(t_i)
@classmethod
def lowercase_ ( cls : Dict ):
'''simple docstring'''
return cls()
@dataclass
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class lowerCamelCase_ ( __a , __a ):
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : int , _A : float = 0.0_2 , _A : float = 100 , _A : float = 1.0_0_7 , _A : float = 80 , _A : float = 0.0_5 , _A : float = 50 , ):
'''simple docstring'''
pass
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def lowercase_ ( self : int , _A : KarrasVeSchedulerState , _A : int , _A : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Any = jnp.arange(0 , _A )[::-1].copy()
UpperCAmelCase__ : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , )
def lowercase_ ( self : List[str] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : random.KeyArray , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase__ : str = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase__ : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase__ : Optional[Any] = random.split(_A , num=1 )
UpperCAmelCase__ : Union[str, Any] = self.config.s_noise * random.normal(key=_A , shape=sample.shape )
UpperCAmelCase__ : Dict = sigma + gamma * sigma
UpperCAmelCase__ : Union[str, Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase_ ( self : Dict , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase__ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def lowercase_ ( self : Dict , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , _A : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sample_prev + sigma_prev * model_output
UpperCAmelCase__ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase__ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def lowercase_ ( self : Optional[int] , _A : KarrasVeSchedulerState , _A : Union[str, Any] , _A : int , _A : Any ):
'''simple docstring'''
raise NotImplementedError()
| 75 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 0 |
"""simple docstring"""
import numpy as np
import datasets
a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# convert to numpy arrays
__lowercase : Dict = np.array(UpperCamelCase_ )
__lowercase : str = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
__lowercase : Tuple = X - np.mean(UpperCamelCase_ )
__lowercase : List[Any] = np.cov(reference_distribution.T )
try:
__lowercase : Tuple = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
__lowercase : str = np.linalg.pinv(UpperCamelCase_ )
__lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 76 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCAmelCase : Union[str, Any] = n - k
# Calculate C(n,k)
for i in range(UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1)
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
__UpperCAmelCase : Optional[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase )
if __name__ == "__main__":
A = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 77 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(snake_case_ , n - 1 , snake_case_ ) * a) % mod
else:
UpperCAmelCase_ = binary_exponentiation(snake_case_ , n / 2 , snake_case_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE_: Optional[int] =7_01
SCREAMING_SNAKE_CASE_: Any =10_00_00_00_00
SCREAMING_SNAKE_CASE_: Optional[Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 78 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : List[Any] = ["""small""", """medium""", """large"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = """lm_head.decoder.weight"""
SCREAMING_SNAKE_CASE__ : Tuple = """lm_head.weight"""
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = torch.load(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
SCREAMING_SNAKE_CASE__ : int = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 79 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 0 |
import os
import sys
import unittest
__UpperCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCamelCase : Optional[int] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
__UpperCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = get_test_to_tester_mapping(_lowerCAmelCase )
__lowercase = get_test_to_tester_mapping(_lowerCAmelCase )
__lowercase = {"""BertModelTest""": """BertModelTester"""}
__lowercase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = get_model_to_test_mapping(_lowerCAmelCase )
__lowercase = get_model_to_test_mapping(_lowerCAmelCase )
__lowercase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
__lowercase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = get_model_to_tester_mapping(_lowerCAmelCase )
__lowercase = get_model_to_tester_mapping(_lowerCAmelCase )
__lowercase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
__lowercase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 80 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (CMStochasticIterativeScheduler,)
__UpperCAmelCase : List[Any] = 10
def __snake_case ( self : Any , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case : Optional[int] = 10
__snake_case : Any = self.get_scheduler_config()
__snake_case : List[str] = self.scheduler_classes[0](**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
__snake_case : List[Any] = scheduler.timesteps[0]
__snake_case : List[Any] = scheduler.timesteps[1]
__snake_case : int = self.dummy_sample
__snake_case : Optional[int] = 0.1 * sample
__snake_case : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
__snake_case : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self : Dict ) -> str:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : Tuple = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**lowerCamelCase )
__snake_case : Union[str, Any] = 1
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Union[str, Any] = scheduler.timesteps
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Tuple = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase ):
# 1. scale model input
__snake_case : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# 2. predict noise residual
__snake_case : Union[str, Any] = model(lowerCamelCase , lowerCamelCase )
# 3. predict previous sample x_t-1
__snake_case : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
__snake_case : Tuple = pred_prev_sample
__snake_case : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config()
__snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase )
__snake_case : Tuple = [106, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase )
__snake_case : Union[str, Any] = scheduler.timesteps
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Any = self.dummy_model()
__snake_case : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__snake_case : Tuple = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# 2. predict noise residual
__snake_case : List[str] = model(lowerCamelCase , lowerCamelCase )
# 3. predict previous sample x_t-1
__snake_case : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
__snake_case : List[Any] = pred_prev_sample
__snake_case : Any = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : str = self.get_scheduler_config()
__snake_case : int = scheduler_class(**lowerCamelCase )
__snake_case : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**lowerCamelCase )
__snake_case : Tuple = [39, 30, 12, 1, 0]
__snake_case : Union[str, Any] = len(lowerCamelCase )
with self.assertRaises(lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase , timesteps=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case : str = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase )
| 81 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 0 |
"""simple docstring"""
import os
def a__ ( ):
with open(os.path.dirname(lowerCAmelCase__ ) + "/p022_names.txt" ) as file:
UpperCAmelCase_ = str(file.readlines()[0] )
UpperCAmelCase_ = names.replace("\"" , "" ).split("," )
names.sort()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, name in enumerate(lowerCAmelCase__ ):
for letter in name:
name_score += ord(lowerCAmelCase__ ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 82 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ : List[Any] = get_logger()
SCREAMING_SNAKE_CASE__ : Optional[dict] = None
class snake_case ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Any , a_ : Dict=None , a_ : Dict=None , **a_ : Optional[int] )-> Tuple:
"""simple docstring"""
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_ , a_ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = device if isinstance(a_ , a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
SCREAMING_SNAKE_CASE__ : Tuple = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp_array_kwargs
@staticmethod
def __lowercase( )-> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a_ ): device for device in jax.devices()}
def __lowercase( self : str , a_ : Dict )-> List[str]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_ , axis=0 )
return column
def __lowercase( self : int , a_ : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE__ : str = {'dtype': jnp.intaa}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {'dtype': jnp.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_ , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowercase( self : Optional[Any] , a_ : Any )-> Union[str, Any]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_ , '__array__' ) and not isinstance(a_ , jax.Array ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def __lowercase( self : Union[str, Any] , a_ : dict )-> List[str]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def __lowercase( self : Dict , a_ : pa.Table )-> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.numpy_arrow_extractor().extract_row(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def __lowercase( self : Any , a_ : pa.Table )-> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.numpy_arrow_extractor().extract_column(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE__ : Dict = self.recursive_tensorize(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self._consolidate(a_ )
return column
def __lowercase( self : Optional[int] , a_ : pa.Table )-> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
SCREAMING_SNAKE_CASE__ : str = self.python_features_decoder.decode_batch(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.recursive_tensorize(a_ )
for column_name in batch:
SCREAMING_SNAKE_CASE__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 85 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(__snake_case ):
_UpperCamelCase = y[k] + step_size * ode_func(__snake_case, y[k] )
_UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 | 0 |
def __snake_case ( __UpperCamelCase : int = 100 ):
"""simple docstring"""
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }") | 86 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 19 | 0 |
_lowerCamelCase : Union[str, Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Dict = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 87 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 0 |
"""simple docstring"""
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : str = 0
while number > 0:
_lowerCamelCase : Union[str, Any] = number % 10
sum_of_digits += last_digit
_lowerCamelCase : Union[str, Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case ( __snake_case : int = 100 ):
"""simple docstring"""
_lowerCamelCase : List[str] = factorial(__snake_case )
_lowerCamelCase : Union[str, Any] = split_and_add(__snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 88 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyVaaControlnetImgaImgPipeline
lowercase_ : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase_ : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase_ : str = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : List[str] = False
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Tuple = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : List[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = self.dummy_unet
_lowercase : List[Any] = self.dummy_movq
_lowercase : Union[str, Any] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : Any = DDIMScheduler(**lowerCamelCase)
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Any = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : str = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
# create hint
_lowercase : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Any = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Any = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : List[str] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : Tuple = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : int = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy')
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Dict = init_image.resize((5_12, 5_12))
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
_lowercase : List[str] = torch.from_numpy(np.array(lowerCamelCase)).float() / 2_5_5.0
_lowercase : Union[str, Any] = hint.permute(2, 0, 1).unsqueeze(0)
_lowercase : int = 'A robot, 4k photo'
_lowercase : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa)
_lowercase : Union[str, Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : str = pipe_prior(
lowerCamelCase, image=lowerCamelCase, strength=0.8_5, generator=lowerCamelCase, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, hint=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=5_12, width=5_12, strength=0.5, output_type='np', )
_lowercase : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 89 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
A = import_module('tasks' )
try:
A = getattr(snake_case__ , model_args.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A = token_classification_task.get_labels(data_args.labels )
A = dict(enumerate(snake_case__ ) )
A = len(snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__ )} , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> Tuple[List[int], List[int]]:
A = np.argmax(snake_case__ , axis=2 )
A , A = preds.shape
A = [[] for _ in range(snake_case__ )]
A = [[] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
A , A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__ ),
"precision": precision_score(snake_case__ , snake_case__ ),
"recall": recall_score(snake_case__ , snake_case__ ),
"f1": fa_score(snake_case__ , snake_case__ ),
}
# Data collator
A = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
results.update(snake_case__ )
# Predict
if training_args.do_predict:
A = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A , A , A = trainer.predict(snake_case__ )
A , A = align_predictions(snake_case__ , snake_case__ )
A = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
A = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__ )
return results
def _snake_case ( snake_case__ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 91 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'trajectory_transformer'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[str]=100 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Optional[int]=249 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Union[str, Any]=17 , UpperCAmelCase__ : Tuple=25 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : str=128 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : str=0.00_06 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Any=50256 , UpperCAmelCase__ : Any=50256 , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase : str =vocab_size
lowercase : Tuple =action_weight
lowercase : Dict =reward_weight
lowercase : str =value_weight
lowercase : Tuple =max_position_embeddings
lowercase : Any =block_size
lowercase : Dict =action_dim
lowercase : List[str] =observation_dim
lowercase : List[Any] =transition_dim
lowercase : Any =learning_rate
lowercase : Any =n_layer
lowercase : Any =n_head
lowercase : int =n_embd
lowercase : Optional[Any] =embd_pdrop
lowercase : Tuple =attn_pdrop
lowercase : Union[str, Any] =resid_pdrop
lowercase : Optional[int] =initializer_range
lowercase : Union[str, Any] =layer_norm_eps
lowercase : Dict =kaiming_initializer_range
lowercase : Union[str, Any] =use_cache
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 92 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
lowerCAmelCase__ :Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ :Optional[Any] = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 93 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
SCREAMING_SNAKE_CASE = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def A__ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase : List[Any] =[[refs[i] for refs in references] for i in range(UpperCAmelCase )]
lowercase : int =TER(
normalized=UpperCAmelCase , no_punct=UpperCAmelCase , asian_support=UpperCAmelCase , case_sensitive=UpperCAmelCase , )
lowercase : Union[str, Any] =sb_ter.corpus_score(UpperCAmelCase , UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 94 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ['image_processor']
a :Tuple = 'SamImageProcessor'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor
lowercase_ = -1_0
lowercase_ = self.image_processor.size['''longest_edge''']
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> BatchEncoding:
lowercase_ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ = encoding_image_processor['''original_sizes''']
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ): # Checks if Torch or TF tensor
lowercase_ = original_sizes.numpy()
lowercase_ , lowercase_ , lowercase_ = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
lowercase_ = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]="pt" , ) -> Dict:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase_ , lowercase_ = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
lowercase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
lowercase_ = max([point.shape[0] for point in input_points] )
lowercase_ = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
lowercase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = processed_input_points
return input_points, input_labels
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> np.ndarray:
lowercase_ , lowercase_ = original_size
lowercase_ , lowercase_ = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
lowercase_ = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 2 , 2 )
lowercase_ = coords[..., 0] * (new_w / old_w)
lowercase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 4 )
return coords
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> List[str]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ): # Checks for TF or Torch tensor
lowercase_ = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
lowercase_ = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ):
lowercase_ = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Input labels must be a list of list integers.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
lowercase_ = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ):
lowercase_ = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
lowercase_ = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 97 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : List[str]=30 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[str]=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Any=0.6 , lowerCAmelCase__ : Optional[Any]=None , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = mask_ratio
_UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModel(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFViTMAEForPreTraining(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected sequence length = num_patches
_UpperCamelCase = (self.image_size // self.patch_size) ** 2
_UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFViTMAEForPreTraining(lowerCAmelCase__ )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
_UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_snake_case : int = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_snake_case : Any = False
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : List[Any] = False
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = outputs_dict[0].numpy()
_UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCAmelCase__ : Optional[Any] ):
_UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCAmelCase__ ):
_UpperCamelCase = v.numpy()
else:
_UpperCamelCase = np.array(lowerCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = prepare_numpy_arrays(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase = tf.constant(lowerCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCAmelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCAmelCase__ , lowerCAmelCase__ ),)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCAmelCase__ , '''_keras_serializable''' , lowerCAmelCase__ )
}
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase = tf.convert_to_tensor(lowerCAmelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_UpperCamelCase = main_layer_class(lowerCAmelCase__ )
_UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_UpperCamelCase = tf.keras.Model(lowerCAmelCase__ , outputs=main_layer(lowerCAmelCase__ ) )
_UpperCamelCase = model(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''keras_model.h5''' )
model.save(lowerCAmelCase__ )
_UpperCamelCase = tf.keras.models.load_model(
lowerCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCAmelCase__ , tf.keras.Model )
_UpperCamelCase = model(lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = outputs.last_hidden_state.numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = outputs.logits.numpy()
_UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
_UpperCamelCase = model_class.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = after_outputs['''last_hidden_state'''].numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = after_outputs['''logits'''].numpy()
_UpperCamelCase = 0
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCAmelCase__ )
_UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_UpperCamelCase = model_class.from_config(model.config )
_UpperCamelCase = new_model(lowerCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_UpperCamelCase = new_model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase = ViTMAEConfig()
_UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
# verify the logits
_UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 98 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(batch_size=2 , length=A_ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE__ = jax.nn.softmax(A_ , axis=-1 )
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE__ = jax.nn.softmax(temp_dist_warper_sharper(A_ , scores.copy() , cur_len=A_ ) , axis=-1 )
SCREAMING_SNAKE_CASE__ = jax.nn.softmax(temp_dist_warper_smoother(A_ , scores.copy() , cur_len=A_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 2
# create ramp distribution
SCREAMING_SNAKE_CASE__ = np.broadcast_to(np.arange(A_ )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ = top_k_warp(A_ , A_ , cur_len=A_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE__ = np.broadcast_to(np.arange(A_ )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE__ = top_k_warp_safety_check(A_ , A_ , cur_len=A_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE__ = np.exp(top_p_warp(A_ , A_ , cur_len=A_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE__ = np.broadcast_to(np.arange(A_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE__ = top_p_warp(A_ , A_ , cur_len=A_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A_ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = min_dist_processor(A_ , A_ , cur_len=A_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = min_dist_processor(A_ , A_ , cur_len=A_ )
self.assertFalse(jnp.isinf(A_ ).any() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A_ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertFalse(jnp.isinf(A_ ).any() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A_ , eos_token_id=A_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertFalse(jnp.isinf(A_ ).any() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, sequence_length) , A_ )
SCREAMING_SNAKE_CASE__ = input_ids.copy()
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A_ , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = 10
# no processor list
SCREAMING_SNAKE_CASE__ = temp_dist_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_k_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_p_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = min_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = bos_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = eos_dist_proc(A_ , A_ , cur_len=A_ )
# with processor list
SCREAMING_SNAKE_CASE__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ = processor(A_ , A_ , cur_len=A_ )
# scores should be equal
self.assertTrue(jnp.allclose(A_ , A_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, sequence_length) , A_ )
SCREAMING_SNAKE_CASE__ = input_ids.copy()
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A_ , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = 10
# no processor list
def run_no_processor_list(A_ , A_ , A_ ):
SCREAMING_SNAKE_CASE__ = temp_dist_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_k_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_p_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = min_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = bos_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = eos_dist_proc(A_ , A_ , cur_len=A_ )
return scores
# with processor list
def run_processor_list(A_ , A_ , A_ ):
SCREAMING_SNAKE_CASE__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ = processor(A_ , A_ , cur_len=A_ )
return scores
SCREAMING_SNAKE_CASE__ = jax.jit(A_ )
SCREAMING_SNAKE_CASE__ = jax.jit(A_ )
SCREAMING_SNAKE_CASE__ = jitted_run_no_processor_list(A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = jitted_run_processor_list(A_ , A_ , A_ )
# scores should be equal
self.assertTrue(jnp.allclose(A_ , A_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 100 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[Edge]] = [[] for _ in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE_ : List[str] = size
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._size
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = deque([start_vertex] )
SCREAMING_SNAKE_CASE_ : list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_ : int = 0
while queue:
SCREAMING_SNAKE_CASE_ : Tuple = queue.popleft()
SCREAMING_SNAKE_CASE_ : Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_ : Dict = current_distance + edge.weight
SCREAMING_SNAKE_CASE_ : List[Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return data[1:] + data[0]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = """"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = int("""0b""" + data[0] + data[-1] , 2 )
UpperCamelCase : Any = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = message[:4]
UpperCamelCase : int = message[4:]
UpperCamelCase : int = apply_table(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : str = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = apply_sbox(SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
UpperCamelCase : str = apply_sbox(SCREAMING_SNAKE_CASE , temp[4:] )
UpperCamelCase : Union[str, Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE )) + l # noqa: E741
UpperCamelCase : Union[str, Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE )) + r
UpperCamelCase : int = apply_table(l + r , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = input("""Enter 10 bit key: """)
__magic_name__ : Union[str, Any] = input("""Enter 8 bit message: """)
__magic_name__ : Dict = [6, 3, 7, 4, 8, 5, 1_0, 9]
__magic_name__ : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__magic_name__ : Optional[Any] = [2, 4, 3, 1]
__magic_name__ : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
__magic_name__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
__magic_name__ : str = [4, 1, 2, 3, 2, 3, 4, 1]
__magic_name__ : Any = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__magic_name__ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__magic_name__ : Dict = apply_table(key, paa_table)
__magic_name__ : Union[str, Any] = temp[:5]
__magic_name__ : Optional[Any] = temp[5:]
__magic_name__ : str = left_shift(left)
__magic_name__ : List[str] = left_shift(right)
__magic_name__ : Tuple = apply_table(left + right, pa_table)
__magic_name__ : Dict = left_shift(left)
__magic_name__ : Union[str, Any] = left_shift(right)
__magic_name__ : List[Any] = left_shift(left)
__magic_name__ : str = left_shift(right)
__magic_name__ : List[str] = apply_table(left + right, pa_table)
# encryption
__magic_name__ : List[str] = apply_table(message, IP)
__magic_name__ : str = function(expansion, sa, sa, keya, temp)
__magic_name__ : str = temp[4:] + temp[:4]
__magic_name__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
__magic_name__ : Any = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__magic_name__ : Tuple = apply_table(CT, IP)
__magic_name__ : List[str] = function(expansion, sa, sa, keya, temp)
__magic_name__ : Dict = temp[4:] + temp[:4]
__magic_name__ : int = function(expansion, sa, sa, keya, temp)
__magic_name__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 102 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case ( lowerCAmelCase_ ) -> Tuple:
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_snake_case = [image]
_snake_case = [trans(img.convert('''RGB''' ) ) for img in image]
_snake_case = torch.stack(lowerCAmelCase_ )
return image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_snake_case = min(int(num_inference_steps * strength ) , __lowerCamelCase )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}""" )
_snake_case = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_snake_case = init_latents.shape
_snake_case = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
print('''add noise to latents at timestep''' , __lowerCamelCase )
_snake_case = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = init_latents
return latents
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowerCamelCase : float = 0.8 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 5_0 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
"""simple docstring"""
self.check_inputs(__lowerCamelCase )
# 2. Preprocess image
_snake_case = preprocess(__lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
_snake_case , _snake_case = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
_snake_case = timesteps[:1].repeat(__lowerCamelCase )
# 4. Prepare latent variables
_snake_case = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase )
_snake_case = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase ):
# 1. predict noise model_output
_snake_case = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase )
| 103 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 104 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
A = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
A = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
A = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
A = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
A = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
A = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
A = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
A = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
A = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
A = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
A = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
A = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
A = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
A = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
A = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
A = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
A = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
A = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
A = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
A = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
A = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
A = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
A = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A , A = int(key_split[2] ), int(key_split[4] )
A = config.vision_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A = int(key_split[3] )
A = config.text_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[
dim : dim * 2, :
]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
else:
A = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A = val.squeeze_()
else:
A = val
return orig_state_dict
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]="groupvit-gcc-yfcc" , lowerCAmelCase__ : str=False ) -> Any:
'''simple docstring'''
A = GroupViTConfig()
A = GroupViTModel(lowerCAmelCase__ ).eval()
A = torch.load(lowerCAmelCase__ , map_location='cpu' )['model']
A = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
A , A = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
A = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
A = prepare_img()
A = processor(text=['a photo of a cat', 'a photo of a dog'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
A = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
A = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('Successfully saved processor and model to' , lowerCAmelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
model.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case :Any =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 106 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCAmelCase : Dict = logging.getLogger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : str=None, UpperCamelCase__ : Tuple=None ) -> List[str]:
_A = self.layer[current_layer](UpperCamelCase__, UpperCamelCase__, head_mask[current_layer] )
_A = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , _UpperCamelCase , )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : Tuple ) -> List[Any]:
super().__init__(UpperCamelCase__ )
_A = BertEncoderWithPabee(UpperCamelCase__ )
self.init_weights()
_A = 0
_A = 0
_A = 0
_A = 0
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int] ) -> Dict:
_A = threshold
def __UpperCAmelCase ( self : int, UpperCamelCase__ : List[Any] ) -> List[str]:
_A = patience
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_A = 0
_A = 0
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_A = self.inference_layers_num / self.inference_instances_num
_A = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : str=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Any=False, ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_A = input_ids.size()
elif inputs_embeds is not None:
_A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_A = torch.ones(UpperCamelCase__, device=UpperCamelCase__ )
if token_type_ids is None:
_A = torch.zeros(UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_A = self.get_extended_attention_mask(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_A , _A , _A = encoder_hidden_states.size()
_A = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_A = torch.ones(UpperCamelCase__, device=UpperCamelCase__ )
_A = self.invert_attention_mask(UpperCamelCase__ )
else:
_A = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_A = self.get_head_mask(UpperCamelCase__, self.config.num_hidden_layers )
_A = self.embeddings(
input_ids=UpperCamelCase__, position_ids=UpperCamelCase__, token_type_ids=UpperCamelCase__, inputs_embeds=UpperCamelCase__ )
_A = embedding_output
if self.training:
_A = []
for i in range(self.config.num_hidden_layers ):
_A = self.encoder.adaptive_forward(
UpperCamelCase__, current_layer=UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__ )
_A = self.pooler(UpperCamelCase__ )
_A = output_layers[i](output_dropout(UpperCamelCase__ ) )
res.append(UpperCamelCase__ )
elif self.patience == 0: # Use all layers for inference
_A = self.encoder(
UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, encoder_attention_mask=UpperCamelCase__, )
_A = self.pooler(encoder_outputs[0] )
_A = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase__ )]
else:
_A = 0
_A = None
_A = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_A = self.encoder.adaptive_forward(
UpperCamelCase__, current_layer=UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__ )
_A = self.pooler(UpperCamelCase__ )
_A = output_layers[i](UpperCamelCase__ )
if regression:
_A = logits.detach()
if patient_result is not None:
_A = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_A = 0
else:
_A = logits.detach().argmax(dim=1 )
if patient_result is not None:
_A = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase__ ) ):
patient_counter += 1
else:
_A = 0
_A = logits
if patient_counter == self.patience:
break
_A = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , _UpperCamelCase , )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : List[str] ) -> Any:
super().__init__(UpperCamelCase__ )
_A = config.num_labels
_A = BertModelWithPabee(UpperCamelCase__ )
_A = nn.Dropout(config.hidden_dropout_prob )
_A = nn.ModuleList(
[nn.Linear(config.hidden_size, self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int=None, UpperCamelCase__ : str=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : List[str]=None, ) -> str:
_A = self.bert(
input_ids=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, position_ids=UpperCamelCase__, head_mask=UpperCamelCase__, inputs_embeds=UpperCamelCase__, output_dropout=self.dropout, output_layers=self.classifiers, regression=self.num_labels == 1, )
_A = (logits[-1],)
if labels is not None:
_A = None
_A = 0
for ix, logits_item in enumerate(UpperCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
_A = MSELoss()
_A = loss_fct(logits_item.view(-1 ), labels.view(-1 ) )
else:
_A = CrossEntropyLoss()
_A = loss_fct(logits_item.view(-1, self.num_labels ), labels.view(-1 ) )
if total_loss is None:
_A = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_A = (total_loss / total_weights,) + outputs
return outputs
| 107 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(__snake_case ):
_UpperCamelCase = y[k] + step_size * ode_func(__snake_case, y[k] )
_UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = '''gelu'''
def __init__( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int]=13 , lowerCamelCase : Optional[int]=7 , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=False , lowerCamelCase : Any=99 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=4 , lowerCamelCase : Any=37 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Dict=20 , lowerCamelCase : List[Any]=2 , lowerCamelCase : str=1 , lowerCamelCase : Tuple=0 , lowerCamelCase : Optional[Any]=4 , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCAmelCase = prepare_led_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tf.concat(
[tf.zeros_like(lowerCamelCase )[:, :-1], tf.ones_like(lowerCamelCase )[:, -1:]] , axis=-1 , )
_UpperCAmelCase = global_attention_mask
return config, inputs_dict
def lowerCamelCase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFLEDModel(config=lowerCamelCase ).get_decoder()
_UpperCAmelCase = inputs_dict["""input_ids"""]
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , ) -> List[Any]:
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFLEDModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = tf.zeros_like(inputs_dict["""attention_mask"""] )
_UpperCAmelCase = 2
_UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_UpperCAmelCase = True
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase : Optional[Any] ):
_UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCamelCase : Any ):
_UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
_UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
# TODO: Head-masking not yet implement
pass
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Any:
return tf.constant(__snake_case , dtype=tf.intaa )
__a: int = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_UpperCAmelCase = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase )[0]
_UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
_UpperCAmelCase = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 )
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_UpperCAmelCase = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase )[0]
_UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
_UpperCAmelCase = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 , rtol=1E-3 ) | 108 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 19 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Tuple = MobileBertTokenizer
__UpperCamelCase : List[str] = MobileBertTokenizerFast
__UpperCamelCase : Dict = True
__UpperCamelCase : List[str] = True
__UpperCamelCase : Any = filter_non_english
__UpperCamelCase : str = 'google/mobilebert-uncased'
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
# With lower casing
__SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ,lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase ,return_attention_mask=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_offsets_mapping=lowerCamelCase ,add_special_tokens=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase ,"""do_lower_case""" ) else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
__SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
| 109 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a ( lowercase ):
UpperCamelCase : Tuple = """levit"""
def __init__( self , UpperCamelCase_=224 , UpperCamelCase_=3 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=16 , UpperCamelCase_=[128, 256, 384] , UpperCamelCase_=[4, 8, 12] , UpperCamelCase_=[4, 4, 4] , UpperCamelCase_=[16, 16, 16] , UpperCamelCase_=0 , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=0.02 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = kernel_size
UpperCAmelCase__ : Optional[Any] = stride
UpperCAmelCase__ : Optional[Any] = padding
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = depths
UpperCAmelCase__ : List[str] = key_dim
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : Any = patch_size
UpperCAmelCase__ : int = attention_ratio
UpperCAmelCase__ : Union[str, Any] = mlp_ratio
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[str] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a ( lowercase ):
UpperCamelCase : Tuple = version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 110 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase = trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=3_8_4,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=1_2_8,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=2_0,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=3_0,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=4_2, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
__lowerCAmelCase = parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
__lowerCAmelCase = args.per_device_eval_batch_size
__lowerCAmelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase = True
__lowerCAmelCase = """temp_engine/bert-fp32.engine"""
if args.fpaa:
__lowerCAmelCase = """temp_engine/bert-fp16.engine"""
if args.inta:
__lowerCAmelCase = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
__lowerCAmelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase = [network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def UpperCAmelCase_ (__a : List[Any] , __a : str , __a : List[str] , __a : Optional[Any] , __a : Any , __a : List[str] , __a : int , __a : List[Any] ):
"""simple docstring"""
_a : Union[str, Any] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
_a : Optional[Any] = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
_a : Any = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __snake_case )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __snake_case )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __snake_case )
# start time
_a : Union[str, Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(__snake_case ) for d_inp in d_inputs] + [int(__snake_case ), int(__snake_case )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__snake_case , __snake_case , __snake_case )
cuda.memcpy_dtoh_async(__snake_case , __snake_case , __snake_case )
# Synchronize the stream and take time
stream.synchronize()
# end time
_a : Tuple = time.time()
_a : Union[str, Any] = end_time - start_time
_a : str = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase = raw_datasets["""validation"""].column_names
__lowerCAmelCase = """question""" if """question""" in column_names else column_names[0]
__lowerCAmelCase = """context""" if """context""" in column_names else column_names[1]
__lowerCAmelCase = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__lowerCAmelCase = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_a : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__snake_case , stride=args.doc_stride , return_overflowing_tokens=__snake_case , return_offsets_mapping=__snake_case , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_a : Optional[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_a : List[Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_a : str = tokenized_examples.sequence_ids(__snake_case )
_a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_a : int = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_a : Optional[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__lowerCAmelCase = raw_datasets["""validation"""]
# Validation Feature Creation
__lowerCAmelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
__lowerCAmelCase = default_data_collator
__lowerCAmelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
__lowerCAmelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCAmelCase_ (__a : Any , __a : str , __a : Optional[int] , __a : Union[str, Any]="eval" ):
"""simple docstring"""
_a : List[str] = postprocess_qa_predictions(
examples=__snake_case , features=__snake_case , predictions=__snake_case , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__snake_case , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_a : Optional[Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
_a : Union[str, Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
_a : Any = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__snake_case , label_ids=__snake_case )
__lowerCAmelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__snake_case ) ) * engine.get_binding_dtype(__snake_case ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0
__lowerCAmelCase = timeit.default_timer()
__lowerCAmelCase = None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase = outputs
__lowerCAmelCase = torch.tensor(start_logits)
__lowerCAmelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__lowerCAmelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__lowerCAmelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__lowerCAmelCase = nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0))
logger.info("""Total Number of Inference = %d""", niter)
__lowerCAmelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 229 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__snake_case , **__snake_case )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 403 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case : Any = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
lowercase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ) -> str:
"""simple docstring"""
_a = ZeroShotClassificationPipeline(
model=__a , tokenizer=__a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__a , {'''sequence''': ANY(__a ), '''labels''': [ANY(__a )], '''scores''': [ANY(__a )]} )
# No kwarg
_a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__a , {'''sequence''': ANY(__a ), '''labels''': [ANY(__a )], '''scores''': [ANY(__a )]} )
_a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__a , {'''sequence''': ANY(__a ), '''labels''': [ANY(__a )], '''scores''': [ANY(__a )]} )
_a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__a , {'''sequence''': ANY(__a ), '''labels''': [ANY(__a ), ANY(__a )], '''scores''': [ANY(__a ), ANY(__a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
_a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__a , {'''sequence''': ANY(__a ), '''labels''': [ANY(__a ), ANY(__a )], '''scores''': [ANY(__a ), ANY(__a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
_a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__a , {'''sequence''': ANY(__a ), '''labels''': [ANY(__a )], '''scores''': [ANY(__a )]} )
# https://github.com/huggingface/transformers/issues/13846
_a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__a , [
{'''sequence''': ANY(__a ), '''labels''': [ANY(__a ), ANY(__a )], '''scores''': [ANY(__a ), ANY(__a )]}
for i in range(1 )
] , )
_a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__a , [
{'''sequence''': ANY(__a ), '''labels''': [ANY(__a ), ANY(__a )], '''scores''': [ANY(__a ), ANY(__a )]}
for i in range(2 )
] , )
with self.assertRaises(__a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__a ):
classifier(__a , candidate_labels='''politics''' )
with self.assertRaises(__a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__a )
with self.assertRaises(__a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__a , )
self.run_entailment_id(__a )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
_a = zero_shot_classifier.model.config
_a = config.labelaid
_a = zero_shot_classifier.entailment_id
_a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_a = original_labelaid
self.assertEqual(__a , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
_a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
_a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
_a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
_a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
_a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
_a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 22 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = ['image_processor', 'tokenizer']
lowerCAmelCase = 'ViTImageProcessor'
lowerCAmelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
lowerCAmelCase = kwargs.pop("feature_extractor" )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if visual_prompt is not None:
lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if images is not None:
lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if visual_prompt is not None and images is not None:
lowerCAmelCase = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def __A ( self : Dict , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def __A ( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@property
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def __A ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 649 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 0 |
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase__ : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase__ : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 614 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Any=None , **__lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[x.strip() for x in open(__snake_case ).readlines()]
_UpperCAmelCase : int =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCAmelCase : Tuple =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 446 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int = 0 ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = length or len(__snake_case )
SCREAMING_SNAKE_CASE__ : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
return list_data if not swapped else bubble_sort(__snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 0 |
import string
from math import logaa
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> int:
UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase = corpus_without_punctuation.split("\n" )
UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> float:
return round(tf * idf , 3 )
| 323 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __UpperCamelCase ( ):
lowercase__ : Union[str, Any] = HfArgumentParser(__snake_case )
lowercase__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
lowercase__ : List[str] = TensorFlowBenchmark(args=__snake_case )
try:
lowercase__ : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ : Dict = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase__ : int = ''' '''.join(str(__snake_case ).split(''' ''' )[:-1] )
lowercase__ : List[str] = ''''''
lowercase__ : Tuple = eval(str(__snake_case ).split(''' ''' )[-1] )
lowercase__ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
lowercase__ : List[Any] = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 152 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 0 |
_snake_case = 8.314_4598
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 3_00
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 340 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.