code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( A , A , A ):
# Initialise PyTorch model
UpperCamelCase__ = MobileBertConfig.from_json_file(A )
print(f"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ = MobileBertForPreTraining(A )
# Load weights from tf checkpoint
UpperCamelCase__ = load_tf_weights_in_mobilebert(A , A , A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 415 | import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( A ):
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _A :
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
pass
def _a (self ) -> Tuple:
'''simple docstring'''
pass
def _a (self ) -> Dict:
'''simple docstring'''
pass
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , F"Difference between torch and flax is {diff} (>= {tol})." )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_vision_text_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_vision_text_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = after_output[0]
UpperCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-3 )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_vision_text_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(
input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = to_atuple(vision_model.config.image_size )
UpperCamelCase__ = to_atuple(vision_model.config.patch_size )
UpperCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase__ = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
pt_model.to(SCREAMING_SNAKE_CASE_ )
pt_model.eval()
# prepare inputs
UpperCamelCase__ = inputs_dict
UpperCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase__ = pt_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCamelCase__ = fx_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = fx_model_loaded(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_ )
pt_model_loaded.to(SCREAMING_SNAKE_CASE_ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase__ = pt_model_loaded(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE_ , pt_output_loaded.numpy() , 4E-2 )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = fx_state
self.check_pt_flax_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params )
self.check_pt_flax_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE_ )
@is_pt_flax_cross_test
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = config_inputs_dict.pop('''vision_config''' )
UpperCamelCase__ = config_inputs_dict.pop('''text_config''' )
UpperCamelCase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.check_equivalence_flax_to_pt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_pretrained_model_and_inputs()
UpperCamelCase__ = model_a(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_a(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = after_outputs[0]
UpperCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-5 )
@require_flax
class _A ( __UpperCamelCase , unittest.TestCase ):
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=SCREAMING_SNAKE_CASE_ , text_from_pt=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = 13
UpperCamelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase__ = random_attention_mask([batch_size, 4] )
UpperCamelCase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = FlaxViTModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
return vision_model, text_model
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = FlaxViTModelTester(self )
UpperCamelCase__ = FlaxBertModelTester(self )
UpperCamelCase__ = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase__ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = vision_config_and_inputs
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _A ( __UpperCamelCase , unittest.TestCase ):
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=SCREAMING_SNAKE_CASE_ , text_from_pt=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = 13
UpperCamelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase__ = random_attention_mask([batch_size, 4] )
UpperCamelCase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
return vision_model, text_model
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModelTester(self )
UpperCamelCase__ = FlaxBertModelTester(self )
UpperCamelCase__ = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase__ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = vision_config_and_inputs
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _A ( unittest.TestCase ):
@slow
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
UpperCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase__ = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 415 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Dict = batch_size
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : Tuple = use_input_mask
lowerCamelCase_ : Any = use_token_type_ids
lowerCamelCase_ : List[str] = use_labels
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Tuple = type_vocab_size
lowerCamelCase_ : str = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Any = num_labels
lowerCamelCase_ : int = num_choices
lowerCamelCase_ : Union[str, Any] = scope
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : List[str] = None
if self.use_input_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Any = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Dict = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ (self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Tuple = model(A , attention_mask=A )
lowerCamelCase_ : Optional[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : int = True
lowerCamelCase_ : Dict = OpenLlamaModel(A )
model.to(A )
model.eval()
lowerCamelCase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowerCamelCase_ : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowerCamelCase_ : Optional[int] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : int = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Dict = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : Dict = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowerCamelCase_ : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowerCamelCase_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowerCamelCase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowerCamelCase_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.prepare_config_and_inputs()
(
lowerCamelCase_
) : Any = config_and_inputs
lowerCamelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase : str = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] = False
lowerCamelCase : int = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = OpenLlamaModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : str = 3
lowerCamelCase_ : Optional[int] = input_dict['''input_ids''']
lowerCamelCase_ : int = input_ids.ne(1 ).to(A )
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Union[str, Any] = 3
lowerCamelCase_ : Union[str, Any] = '''single_label_classification'''
lowerCamelCase_ : List[Any] = input_dict['''input_ids''']
lowerCamelCase_ : str = input_ids.ne(1 ).to(A )
lowerCamelCase_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : int = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : str = '''multi_label_classification'''
lowerCamelCase_ : Union[str, Any] = input_dict['''input_ids''']
lowerCamelCase_ : Any = input_ids.ne(1 ).to(A )
lowerCamelCase_ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ : Tuple = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : Any = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase__ (self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Any = ids_tensor([1, 1_0] , config.vocab_size )
lowerCamelCase_ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Optional[Any] = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
lowerCamelCase_ : str = original_model(A ).last_hidden_state
lowerCamelCase_ : List[Any] = original_model(A ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCamelCase_ : Optional[Any] = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowerCamelCase_ : Optional[Any] = scaled_model(A ).last_hidden_state
lowerCamelCase_ : Any = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase_ = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase_ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = None
# source code of `config_class`
UpperCamelCase__ = inspect.getsource(__a )
UpperCamelCase__ = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
UpperCamelCase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ = ckpt_name
break
return checkpoint
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase__ = get_checkpoint_from_config_class(__a )
UpperCamelCase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(sorted(__a ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 513 |
from __future__ import annotations
import time
import numpy as np
lowerCamelCase_ = [8, 5, 9, 7]
lowerCamelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = claim_vector
UpperCamelCase__ = allocated_resources_table
UpperCamelCase__ = maximum_claim_table
def UpperCAmelCase_ (self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ (self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ (self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ (self ):
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.__need()
UpperCamelCase__ = self.__allocated_resources_table
UpperCamelCase__ = self.__available_resources()
UpperCamelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCamelCase__ = False
for each_need in need_list:
UpperCamelCase__ = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
UpperCamelCase__ = False
break
if execution:
UpperCamelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase_ (self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 | 1 |
"""simple docstring"""
def A_ ( __lowercase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase_ : Dict =BeautifulSoup(requests.get(__lowercase ).text , 'html.parser' )
UpperCamelCase_ : List[Any] =soup.findAll('h1' )
UpperCamelCase_ : List[str] =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 395 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__: List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( A__ ):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = 0.9,__lowerCamelCase = PILImageResampling.BICUBIC,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = 1 / 255,__lowerCamelCase = True,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = None,**__lowerCamelCase,):
super().__init__(**__snake_case )
A__ = size if size is not None else {'''shortest_edge''': 224}
A__ = get_size_dict(__snake_case,default_to_square=__snake_case )
A__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A__ = get_size_dict(__snake_case,param_name='''crop_size''' )
A__ = do_resize
A__ = size
A__ = crop_pct
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = PILImageResampling.BICUBIC,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = get_size_dict(__snake_case,default_to_square=__snake_case )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
A__ = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A__ = int(size['''height'''] / crop_pct )
else:
A__ = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(__snake_case ) )
A__ = get_resize_output_image_size(__snake_case,size=__snake_case,default_to_square=__snake_case )
else:
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__snake_case,size=size['''shortest_edge'''],default_to_square=__snake_case )
elif "height" in size and "width" in size:
A__ = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(__snake_case ) )
return resize(__snake_case,size=__snake_case,resample=__snake_case,data_format=__snake_case,**__snake_case )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(__snake_case,size=(size['''height'''], size['''width''']),data_format=__snake_case,**__snake_case )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase,):
return rescale(__snake_case,scale=__snake_case,data_format=__snake_case,**__snake_case )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase,):
return normalize(__snake_case,mean=__snake_case,std=__snake_case,data_format=__snake_case,**__snake_case )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = ChannelDimension.FIRST,**__lowerCamelCase,):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = crop_pct if crop_pct is not None else self.crop_pct
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__snake_case,default_to_square=__snake_case )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__snake_case,param_name='''crop_size''' )
A__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
A__ = [self.resize(image=__snake_case,size=__snake_case,crop_pct=__snake_case,resample=__snake_case ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=__snake_case,size=__snake_case ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__snake_case,scale=__snake_case ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__snake_case,mean=__snake_case,std=__snake_case ) for image in images]
A__ = [to_channel_dimension_format(__snake_case,__snake_case ) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=__snake_case,tensor_type=__snake_case )
| 190 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv2ImageProcessor"
snake_case_ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : List[Any] , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , **__snake_case : Union[str, Any] )-> str:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self : Optional[Any] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[str] , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
snake_case = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["""words"""]
snake_case = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
snake_case = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(__snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case = images
return encoded_inputs
def lowerCAmelCase ( self : Any , __snake_case : List[Any] , __snake_case : str )-> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def lowerCAmelCase ( self : int , *__snake_case : Optional[int] , **__snake_case : Tuple )-> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Dict , *__snake_case : Tuple , **__snake_case : Optional[int] )-> List[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : str )-> int:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase ( self : Union[str, Any] )-> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : Optional[Any] )-> Tuple:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 369 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
snake_case_ : int = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase_ : str = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
UpperCAmelCase_ : List[str] = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).shape
else:
UpperCAmelCase_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : str = value
elif weight_type == "weight_v":
UpperCAmelCase_ : Tuple = value
elif weight_type == "bias":
UpperCAmelCase_ : str = value
else:
UpperCAmelCase_ : Optional[int] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = fairseq_model.state_dict()
UpperCAmelCase_ : Tuple = hf_model.feature_extractor
UpperCAmelCase_ : Union[str, Any] = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCAmelCase_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, hf_model.config.feat_extract_norm == '''group''', )
UpperCAmelCase_ : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase_ : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2]
UpperCAmelCase_ : Any = mapped_key.replace('''*''', SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase_ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ : List[Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase_ : Dict = '''bias'''
elif "weight" in name:
UpperCAmelCase_ : Dict = '''weight'''
else:
UpperCAmelCase_ : Optional[int] = None
set_recursively(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Any = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = name.split('''.''' )
UpperCAmelCase_ : int = int(items[0] )
UpperCAmelCase_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = full_name.split('''adaptor.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = name.split('''.''' )
if items[1].isdigit():
UpperCAmelCase_ : Union[str, Any] = int(items[1] )
else:
UpperCAmelCase_ : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
UpperCAmelCase_ : int = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
UpperCAmelCase_ : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
UpperCAmelCase_ : List[str] = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
UpperCAmelCase_ : int = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
UpperCAmelCase_ : Tuple = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
UpperCAmelCase_ : List[Any] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = emb.weight.shape
UpperCAmelCase_ : Any = nn.Linear(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, bias=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : int, ) -> Any:
UpperCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__, add_adapter=SCREAMING_SNAKE_CASE__, adapter_stride=SCREAMING_SNAKE_CASE__, adapter_kernel_size=SCREAMING_SNAKE_CASE__, use_auth_token=SCREAMING_SNAKE_CASE__, output_hidden_size=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : Optional[Any] = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
UpperCAmelCase_ : Any = model[0].eval()
# load feature extractor
UpperCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__, use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
UpperCAmelCase_ : int = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder, SCREAMING_SNAKE_CASE__ )
# load decoder weights
UpperCAmelCase_ : Union[str, Any] = MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=SCREAMING_SNAKE_CASE__ )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ : int = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__, decoder=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : int = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = hf_wavavec.config.to_dict()
UpperCAmelCase_ : Tuple = tokenizer.pad_token_id
UpperCAmelCase_ : Optional[int] = tokenizer.bos_token_id
UpperCAmelCase_ : Optional[int] = tokenizer.eos_token_id
UpperCAmelCase_ : int = '''mbart50'''
UpperCAmelCase_ : List[str] = '''wav2vec2'''
UpperCAmelCase_ : Dict = tokenizer.eos_token_id
UpperCAmelCase_ : List[str] = 250004
UpperCAmelCase_ : List[str] = tokenizer.eos_token_id
UpperCAmelCase_ : Dict = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
snake_case_ : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 644 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter image url: ').strip()
print(F'Downloading image from {url} ...')
lowerCAmelCase__ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ = soup.find('meta', {'property': 'og:image'})['content']
lowerCAmelCase__ = requests.get(image_url).content
lowerCAmelCase__ = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F'Done. Image saved to disk as {file_name}.')
| 621 | 0 |
import unittest
import numpy as np
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : np.ndarray | None = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = np.shape(__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = np.shape(__snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
SCREAMING_SNAKE_CASE__ : int = (
"Expected the same number of rows for A and B. "
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
SCREAMING_SNAKE_CASE__ : Dict = (
"Expected the same number of columns for B and C. "
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = pseudo_inv
if a_inv is None:
try:
SCREAMING_SNAKE_CASE__ : Dict = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class snake_case ( unittest.TestCase ):
def __lowercase( self : Tuple )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[2, 1], [6, 3]] )
SCREAMING_SNAKE_CASE__ : List[str] = schur_complement(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Any = np.block([[a, b], [b.T, c]] )
SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.det(a_ )
SCREAMING_SNAKE_CASE__ : Dict = np.linalg.det(a_ )
SCREAMING_SNAKE_CASE__ : Any = np.linalg.det(a_ )
self.assertAlmostEqual(a_ , det_a * det_s )
def __lowercase( self : Any )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(a_ ):
schur_complement(a_ , a_ , a_ )
def __lowercase( self : Dict )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ : Dict = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(a_ ):
schur_complement(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 713 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636 | 0 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCamelCase :
'''simple docstring'''
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="""gelu""" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = inputs["""prompt"""]
__SCREAMING_SNAKE_CASE : Tuple = inputs["""generator"""]
__SCREAMING_SNAKE_CASE : int = inputs["""num_inference_steps"""]
__SCREAMING_SNAKE_CASE : Any = inputs["""output_type"""]
if "image" in inputs:
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs["""image"""]
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if "mask_image" in inputs:
__SCREAMING_SNAKE_CASE : Optional[int] = inputs["""mask_image"""]
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if "original_image" in inputs:
__SCREAMING_SNAKE_CASE : str = inputs["""original_image"""]
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.encode_prompt(lowerCAmelCase__ )
# inputs with prompt converted to embeddings
__SCREAMING_SNAKE_CASE : Tuple = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = image
if mask_image is not None:
__SCREAMING_SNAKE_CASE : int = mask_image
if original_image is not None:
__SCREAMING_SNAKE_CASE : int = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = pipe(**lowerCAmelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class.from_pretrained(lowerCAmelCase__ )
pipe_loaded.to(lowerCAmelCase__ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) is None , F"`{optional_component}` did not stay set to None after loading." , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs["""generator"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["""num_inference_steps"""]
__SCREAMING_SNAKE_CASE : Dict = inputs["""output_type"""]
# inputs with prompt converted to embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
__SCREAMING_SNAKE_CASE : List[Any] = image
if mask_image is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_image
if original_image is not None:
__SCREAMING_SNAKE_CASE : int = original_image
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe_loaded(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : str = np.abs(to_np(lowerCAmelCase__ ) - to_np(lowerCAmelCase__ ) ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class.from_pretrained(lowerCAmelCase__ )
pipe_loaded.to(lowerCAmelCase__ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = pipe_loaded(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Tuple = np.abs(to_np(lowerCAmelCase__ ) - to_np(lowerCAmelCase__ ) ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 ) | 578 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: Path , _lowerCamelCase: str = None , _lowerCamelCase: str = None , _lowerCamelCase: str = None , ):
if config_name_or_path is None:
__SCREAMING_SNAKE_CASE : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Tuple = question_encoder_name_or_path
__SCREAMING_SNAKE_CASE : int = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__SCREAMING_SNAKE_CASE : List[Any] = RagConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
__SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_config
__SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
_lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
rag_model.save_pretrained(_lowerCamelCase )
# Sanity check.
model_class.from_pretrained(_lowerCamelCase )
# Save tokenizers.
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 578 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __A , __A , __A ) -> Optional[Any]:
'''simple docstring'''
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __A , __A , __A , __A="attention" ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = UpperCAmelCase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCAmelCase__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCAmelCase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCAmelCase__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCAmelCase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCAmelCase__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCAmelCase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCAmelCase__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __A , __A , __A , __A=False ) -> Tuple:
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase__ = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCAmelCase__ = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCAmelCase__ = (wi_a, wi_a)
else:
UpperCAmelCase__ = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCAmelCase__ = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __A , __A , __A , __A ) -> Any:
'''simple docstring'''
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __A , *, __A , __A , __A = False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = traverse_util.flatten_dict(variables["target"] )
UpperCAmelCase__ = {"/".join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase__ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __A )
UpperCAmelCase__ = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase__ = old["token_embedder/embedding"]
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase__ = tax_layer_norm_lookup(__A , __A , "encoder" , "pre_attention_layer_norm" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = tax_attention_lookup(__A , __A , "encoder" , "attention" )
UpperCAmelCase__ = layer_norm
UpperCAmelCase__ = k.T
UpperCAmelCase__ = o.T
UpperCAmelCase__ = q.T
UpperCAmelCase__ = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase__ = tax_layer_norm_lookup(__A , __A , "encoder" , "pre_mlp_layer_norm" )
UpperCAmelCase__ , UpperCAmelCase__ = tax_mlp_lookup(__A , __A , "encoder" , __A )
UpperCAmelCase__ = layer_norm
if split_mlp_wi:
UpperCAmelCase__ = wi[0].T
UpperCAmelCase__ = wi[1].T
else:
UpperCAmelCase__ = wi.T
UpperCAmelCase__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase__ = tax_relpos_bias_lookup(
__A , __A , "encoder" ).T
UpperCAmelCase__ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
UpperCAmelCase__ = tax_relpos_bias_lookup(
__A , 0 , "encoder" ).T
UpperCAmelCase__ = tax_relpos_bias_lookup(
__A , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase__ = tax_layer_norm_lookup(__A , __A , "decoder" , "pre_self_attention_layer_norm" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = tax_attention_lookup(__A , __A , "decoder" , "self_attention" )
UpperCAmelCase__ = layer_norm
UpperCAmelCase__ = k.T
UpperCAmelCase__ = o.T
UpperCAmelCase__ = q.T
UpperCAmelCase__ = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase__ = tax_layer_norm_lookup(__A , __A , "decoder" , "pre_cross_attention_layer_norm" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = tax_attention_lookup(__A , __A , "decoder" , "encoder_decoder_attention" )
UpperCAmelCase__ = layer_norm
UpperCAmelCase__ = k.T
UpperCAmelCase__ = o.T
UpperCAmelCase__ = q.T
UpperCAmelCase__ = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase__ = tax_layer_norm_lookup(__A , __A , "decoder" , "pre_mlp_layer_norm" )
UpperCAmelCase__ , UpperCAmelCase__ = tax_mlp_lookup(__A , __A , "decoder" , __A )
UpperCAmelCase__ = layer_norm
if split_mlp_wi:
UpperCAmelCase__ = wi[0].T
UpperCAmelCase__ = wi[1].T
else:
UpperCAmelCase__ = wi.T
UpperCAmelCase__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase__ = tax_relpos_bias_lookup(__A , __A , "decoder" ).T
UpperCAmelCase__ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase__ = old["decoder/logits_dense/kernel"].T
return new
def __UpperCAmelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase__ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase__ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
UpperCAmelCase__ = state_dict["shared.weight"]
return state_dict
def __UpperCAmelCase ( __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(__A )
UpperCAmelCase__ = convert_tax_to_pytorch(
__A , num_layers=config.num_layers , is_encoder_only=__A , scalable_attention=__A )
UpperCAmelCase__ = make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def __UpperCAmelCase ( __A , __A , __A , __A = False , __A = False , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = MTaConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase__ = UMTaEncoderModel(__A )
else:
UpperCAmelCase__ = UMTaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A , __A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print("Done" )
if __name__ == "__main__":
A = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 703 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , _lowercase : int , _lowercase : List[Any]=None , _lowercase : str=True , _lowercase : str=None , **_lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = config_class
UpperCAmelCase__ = has_text_modality
UpperCAmelCase__ = kwargs
UpperCAmelCase__ = common_properties
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
UpperCAmelCase__ = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowercase , _lowercase ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowercase ):
try:
setattr(_lowercase , _lowercase , _lowercase )
self.parent.assertEqual(
getattr(_lowercase , _lowercase ) , _lowercase , msg=F"""`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowercase ):
try:
UpperCAmelCase__ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowercase , _lowercase ) , _lowercase , msg=F"""`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
UpperCAmelCase__ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = os.path.join(_lowercase , "config.json" )
config_first.to_json_file(_lowercase )
UpperCAmelCase__ = self.config_class.from_json_file(_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowercase )
UpperCAmelCase__ = self.config_class.from_pretrained(_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
UpperCAmelCase__ = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = os.path.join(_lowercase , _lowercase )
config_first.save_pretrained(_lowercase )
UpperCAmelCase__ = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase__ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.config_class.is_composition:
return
UpperCAmelCase__ = self.config_class()
self.parent.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(_lowercase )
UpperCAmelCase__ = self.config_class(**_lowercase )
UpperCAmelCase__ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_lowercase , _lowercase ) != value:
wrong_values.append((key, getattr(_lowercase , _lowercase ), value) )
if len(_lowercase ) > 0:
UpperCAmelCase__ = "\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 277 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = '''<<<<<<< This should probably be modified because it mentions: '''
SCREAMING_SNAKE_CASE__ = '''=======
>>>>>>>
'''
SCREAMING_SNAKE_CASE__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
SCREAMING_SNAKE_CASE__ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def A ( __UpperCamelCase ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
@staticmethod
def _a ( _snake_case : ArgumentParser ):
"""simple docstring"""
A__ = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , _snake_case : str , _snake_case : str , *_snake_case : int ):
"""simple docstring"""
A__ = get_logger('datasets-cli/converting' )
A__ = tfds_path
A__ = datasets_directory
def _a ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
A__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
A__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
A__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
A__ = []
A__ = []
A__ = {}
if os.path.isdir(self._tfds_path ):
A__ = os.listdir(__lowerCAmelCase )
else:
A__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
A__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
A__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
A__ = f.readlines()
A__ = []
A__ = False
A__ = False
A__ = []
for line in lines:
A__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
A__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
A__ = ''''''
continue
elif "from absl import logging" in out_line:
A__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
A__ = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
A__ = True
A__ = list(filter(lambda _snake_case : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '\n' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
A__ = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
A__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
A__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
A__ = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
A__ = f_name.replace('.py' , '' )
A__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
A__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
A__ = os.path.basename(__lowerCAmelCase )
A__ = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 9 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 0 |
from __future__ import annotations
from typing import Any
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
create_state_space_tree(__lowerCAmelCase , [] , 0 )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
if index == len(__lowerCAmelCase ):
print(__lowerCAmelCase )
return
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = """markuplm"""
def __init__( self :int ,__lowercase :str=3_0_5_2_2 ,__lowercase :str=7_6_8 ,__lowercase :str=1_2 ,__lowercase :Dict=1_2 ,__lowercase :Optional[Any]=3_0_7_2 ,__lowercase :Any="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Any=5_1_2 ,__lowercase :List[Any]=2 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=1e-1_2 ,__lowercase :List[Any]=0 ,__lowercase :Optional[int]=0 ,__lowercase :str=2 ,__lowercase :Optional[Any]=2_5_6 ,__lowercase :List[str]=1_0_2_4 ,__lowercase :List[str]=2_1_6 ,__lowercase :Union[str, Any]=1_0_0_1 ,__lowercase :int=3_2 ,__lowercase :Union[str, Any]=5_0 ,__lowercase :Optional[Any]="absolute" ,__lowercase :int=True ,__lowercase :Optional[Any]=None ,**__lowercase :Union[str, Any] ,):
super().__init__(
pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ,)
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Optional[Any] = use_cache
snake_case__ : Optional[Any] = classifier_dropout
# additional properties
snake_case__ : Any = max_depth
snake_case__ : Optional[Any] = max_xpath_tag_unit_embeddings
snake_case__ : Dict = max_xpath_subs_unit_embeddings
snake_case__ : str = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : List[str] = xpath_unit_hidden_size
| 219 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Optional[int]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: Optional[Any]= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Any= min_resolution
SCREAMING_SNAKE_CASE__: Tuple= max_resolution
SCREAMING_SNAKE_CASE__: str= do_resize
SCREAMING_SNAKE_CASE__: Dict= size
SCREAMING_SNAKE_CASE__: List[str]= apply_ocr
def UpperCamelCase_ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[str]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Tuple= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: List[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Any= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Any:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Dict= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: int= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Union[str, Any]= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: Dict= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: str= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 64 |
'''simple docstring'''
import random
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ) -> Optional[Any]:
_lowerCAmelCase : Tuple = a[left_index]
_lowerCAmelCase : Optional[Any] = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_lowerCAmelCase , _lowerCAmelCase : Dict = a[i], a[j]
i += 1
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = a[i - 1], a[left_index]
return i - 1
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[str] ) -> Dict:
if left < right:
_lowerCAmelCase : str = random.randint(_lowerCamelCase , right - 1 )
_lowerCAmelCase , _lowerCAmelCase : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCAmelCase : Tuple = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""" ).strip()
_lowerCAmelCase : List[str] = [int(_lowerCamelCase ) for item in user_input.split(""",""" )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 384 | 0 |
import logging
from transformers import PretrainedConfig
A_ : Tuple = logging.getLogger(__name__)
A_ : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''bertabs'''
def __init__( self : Optional[int] , __lowerCAmelCase : int=3_0522 , __lowerCAmelCase : Dict=512 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[Any]=0.2 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : int=8 , __lowerCAmelCase : str=2048 , __lowerCAmelCase : Any=0.2 , **__lowerCAmelCase : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = vocab_size
a = max_pos
a = enc_layers
a = enc_hidden_size
a = enc_heads
a = enc_ff_size
a = enc_dropout
a = dec_layers
a = dec_hidden_size
a = dec_heads
a = dec_ff_size
a = dec_dropout
| 712 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__a : str = HfApi()
__a : Dict = {}
# fmt: off
__a : List[str] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__a : List[Any] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__a : List[str] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__a : Dict = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__a : str = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__a : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__a : List[str] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__a : Tuple = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__a : List[str] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__a : Any = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__a : Union[str, Any] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__a : int = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__a : Dict = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__a : str = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__a : Tuple = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__a : str = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__a : Union[str, Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
__a : Dict = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
__a : Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__a : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__a : int = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
__a : Optional[int] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 534 | import math
import sys
def lowerCAmelCase( __lowerCamelCase ):
if number != int(__lowerCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__a = [-1] * (number + 1)
__a = 0
for i in range(1 , number + 1 ):
__a = sys.maxsize
__a = int(math.sqrt(__lowerCamelCase ) )
for j in range(1 , root + 1 ):
__a = 1 + answers[i - (j**2)]
__a = min(__lowerCamelCase , __lowerCamelCase )
__a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ = 50_0000
UpperCAmelCase__ , UpperCAmelCase__ = os.path.split(__file__)
UpperCAmelCase__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def A ( _UpperCAmelCase : datasets.Dataset , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = dataset.map(**_UpperCAmelCase )
@get_duration
def A ( _UpperCAmelCase : datasets.Dataset , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = dataset.filter(**_UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase )
_UpperCAmelCase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase )
def tokenize(_UpperCAmelCase : List[str] ):
return tokenizer(examples['text'] )
_UpperCAmelCase = map(_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , batched=_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='numpy' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='pandas' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , function=_UpperCAmelCase , batched=_UpperCAmelCase )
_UpperCAmelCase = filter(_UpperCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 721 |
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = np.full((len(UpperCamelCase_ ), sequence_length, 2) , UpperCamelCase_ )
else:
UpperCamelCase = np.full((len(UpperCamelCase_ ), sequence_length) , UpperCamelCase_ )
for i, tensor in enumerate(UpperCamelCase_ ):
if padding_side == "right":
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = tensor[:sequence_length]
else:
UpperCamelCase = tensor[:sequence_length]
else:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = tensor[:sequence_length]
else:
UpperCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = ord(UpperCamelCase_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCamelCase = unicodedata.category(UpperCamelCase_ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = -100
__lowerCAmelCase = "pt"
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
import torch
UpperCamelCase = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCamelCase = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
UpperCamelCase = torch.tensor(batch["""entity_ids"""] ).shape[1]
UpperCamelCase = self.tokenizer.padding_side
if padding_side == "right":
UpperCamelCase = [
list(lowerCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) for label in labels
]
else:
UpperCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) + list(lowerCamelCase_ ) for label in labels
]
UpperCamelCase = [feature["""ner_tags"""] for feature in features]
UpperCamelCase = padding_tensor(lowerCamelCase_ , -1 , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = [feature["""original_entity_spans"""] for feature in features]
UpperCamelCase = padding_tensor(lowerCamelCase_ , (-1, -1) , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = {k: torch.tensor(lowerCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 537 | def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
if n == 0:
return 0
UpperCamelCase = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase = max(
UpperCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase_ ) )
return max_revue
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase = max(
UpperCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase_ , UpperCamelCase_ ) , )
UpperCamelCase = max_revenue
return max_rev[n]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )]
UpperCamelCase = 0
for i in range(1 , n + 1 ):
UpperCamelCase = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase = max(UpperCamelCase_ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase = max_revenue_i
return max_rev[n]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if n < 0:
UpperCamelCase = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(UpperCamelCase_ )
if n > len(UpperCamelCase_ ):
UpperCamelCase = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
def lowercase( ) -> str:
'''simple docstring'''
UpperCamelCase = [6, 10, 12, 15, 20, 23]
UpperCamelCase = len(UpperCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase = 36
UpperCamelCase = top_down_cut_rod(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = bottom_up_cut_rod(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = naive_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 537 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case (UpperCAmelCase__):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,):
super().__init__()
self.register_modules(transformer=__lowerCAmelCase ,vae=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(__lowerCAmelCase )
UpperCAmelCase_ : int = dict(sorted(self.labels.items() ) )
def UpperCamelCase__ ( self ,_snake_case ):
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
UpperCAmelCase_ : Any = list(__lowerCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self ,_snake_case ,_snake_case = 4.0 ,_snake_case = None ,_snake_case = 50 ,_snake_case = "pil" ,_snake_case = True ,):
UpperCAmelCase_ : Union[str, Any] = len(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = self.transformer.config.sample_size
UpperCAmelCase_ : Union[str, Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=__lowerCAmelCase ,device=self.device ,dtype=self.transformer.dtype ,)
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(__lowerCAmelCase ,device=self.device ).reshape(-1 )
UpperCAmelCase_ : Dict = torch.tensor([10_00] * batch_size ,device=self.device )
UpperCAmelCase_ : List[Any] = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Any = latent_model_input[: len(__lowerCAmelCase ) // 2]
UpperCAmelCase_ : Dict = torch.cat([half, half] ,dim=0 )
UpperCAmelCase_ : str = self.scheduler.scale_model_input(__lowerCAmelCase ,__lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(__lowerCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : str = latent_model_input.device.type == "mps"
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
UpperCAmelCase_ : Tuple = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : Dict = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[Any] = torch.tensor([timesteps] ,dtype=__lowerCAmelCase ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : int = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : Union[str, Any] = self.transformer(
__lowerCAmelCase ,timestep=__lowerCAmelCase ,class_labels=__lowerCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(__lowerCAmelCase ,len(__lowerCAmelCase ) // 2 ,dim=0 )
UpperCAmelCase_ : Optional[int] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : int = torch.cat([half_eps, half_eps] ,dim=0 )
UpperCAmelCase_ : Tuple = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : int = torch.split(__lowerCAmelCase ,__lowerCAmelCase ,dim=1 )
else:
UpperCAmelCase_ : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 ,dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : List[str] = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : int = self.vae.decode(__lowerCAmelCase ).sample
UpperCAmelCase_ : Optional[Any] = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Tuple = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 710 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a__ ( _SCREAMING_SNAKE_CASE : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
UpperCAmelCase_ : Tuple = {"+", "-", "*", "/"}
UpperCAmelCase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
def __init__( self : List[Any] , *_A : List[Any] , **_A : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A ) | 217 |
def UpperCamelCase_( _A :Union[str, Any] )-> List[str]:
UpperCamelCase__ = [0] * len(_A )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
UpperCamelCase__ = queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("Cycle exists" )
else:
print(_A )
# Adjacency List of Graph
__UpperCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 551 | 0 |
import logging
from transformers import PretrainedConfig
_lowercase = logging.getLogger(__name__)
_lowercase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class lowercase_ ( A ):
__lowerCamelCase = "bertabs"
def __init__( self , __A=30_522 , __A=512 , __A=6 , __A=512 , __A=8 , __A=512 , __A=0.2 , __A=6 , __A=768 , __A=8 , __A=2_048 , __A=0.2 , **__A , ) -> Optional[Any]:
super().__init__(**__A )
SCREAMING_SNAKE_CASE_ : List[str] =vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =max_pos
SCREAMING_SNAKE_CASE_ : List[str] =enc_layers
SCREAMING_SNAKE_CASE_ : int =enc_hidden_size
SCREAMING_SNAKE_CASE_ : Any =enc_heads
SCREAMING_SNAKE_CASE_ : List[Any] =enc_ff_size
SCREAMING_SNAKE_CASE_ : int =enc_dropout
SCREAMING_SNAKE_CASE_ : int =dec_layers
SCREAMING_SNAKE_CASE_ : Optional[int] =dec_hidden_size
SCREAMING_SNAKE_CASE_ : Any =dec_heads
SCREAMING_SNAKE_CASE_ : List[Any] =dec_ff_size
SCREAMING_SNAKE_CASE_ : Dict =dec_dropout
| 431 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase_ :
def __init__( self , __A = None , __A = [] ) -> str:
SCREAMING_SNAKE_CASE_ : str =0
SCREAMING_SNAKE_CASE_ : Optional[Any] =choices
SCREAMING_SNAKE_CASE_ : str =prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE_ : List[Any] ='''*'''
else:
SCREAMING_SNAKE_CASE_ : int ='''➔ '''
def _snake_case ( self , __A , __A = "" ) -> Union[str, Any]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __A )
else:
forceWrite(self.choices[index] , __A )
def _snake_case ( self , __A ) -> Optional[int]:
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(__A )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self , __A , __A = 1 ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__A )
move_cursor(__A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def _snake_case ( self ) -> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def _snake_case ( self ) -> Any:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def _snake_case ( self ) -> List[Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def _snake_case ( self ) -> int:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__A )] for number in range(10 )] )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE_ : Any =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __A )
else:
return
else:
return
def _snake_case ( self , __A = 0 ) -> Any:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
SCREAMING_SNAKE_CASE_ : str =default_choice
for i in range(len(self.choices ) ):
self.print_choice(__A )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE_ : Dict =int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE_ : Optional[Any] =default_choice
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__A , '''\n''' )
return choice
| 431 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
"""simple docstring"""
from typing import Any
def __lowerCamelCase ( __UpperCamelCase ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
lowerCAmelCase_ : Any = [input_list.count(__UpperCamelCase ) for value in input_list]
lowerCAmelCase_ : List[Any] = max(__UpperCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__UpperCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Tuple = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$", lowerCamelCase ).groups()[0]
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: List[str] , _lowerCAmelCase: int , _lowerCAmelCase: int=None , _lowerCAmelCase: int=None ):
lowercase :List[Any] = file_names
lowercase :Dict = image_transform
lowercase :str = label_to_id
def __len__( self: Tuple ):
return len(self.file_names )
def __getitem__( self: Optional[int] , _lowerCAmelCase: Any ):
lowercase :int = self.file_names[idx]
lowercase :Optional[int] = PIL.Image.open(_lowerCAmelCase )
lowercase :Dict = raw_image.convert("RGB" )
if self.image_transform is not None:
lowercase :Any = self.image_transform(_lowerCAmelCase )
lowercase :Optional[Any] = extract_label(_lowerCAmelCase )
if self.label_to_id is not None:
lowercase :str = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
# Initialize accelerator
if args.with_tracking:
lowercase :Tuple = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
lowercase :str = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase :Union[str, Any] = config["lr"]
lowercase :str = int(config["num_epochs"] )
lowercase :str = int(config["seed"] )
lowercase :Optional[Any] = int(config["batch_size"] )
lowercase :Union[str, Any] = config["image_size"]
if not isinstance(lowerCamelCase, (list, tuple) ):
lowercase :List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
lowercase :Union[str, Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase :List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
lowercase :Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase :Optional[Any] = os.path.split(lowerCamelCase )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase, lowerCamelCase )
# Grab all the image filenames
lowercase :Tuple = [os.path.join(args.data_dir, lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowercase :int = [extract_label(lowerCamelCase ) for fname in file_names]
lowercase :Dict = list(set(lowerCamelCase ) )
id_to_label.sort()
lowercase :Optional[Any] = {lbl: i for i, lbl in enumerate(lowerCamelCase )}
# Set the seed before splitting the data.
np.random.seed(lowerCamelCase )
torch.manual_seed(lowerCamelCase )
torch.cuda.manual_seed_all(lowerCamelCase )
# Split our filenames between train and validation
lowercase :Dict = np.random.permutation(len(lowerCamelCase ) )
lowercase :List[Any] = int(0.8 * len(lowerCamelCase ) )
lowercase :str = random_perm[:cut]
lowercase :Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase :Optional[Any] = Compose([RandomResizedCrop(lowerCamelCase, scale=(0.5, 1.0) ), ToTensor()] )
lowercase :List[str] = PetsDataset(
[file_names[i] for i in train_split], image_transform=lowerCamelCase, label_to_id=lowerCamelCase )
# For evaluation, we use a deterministic Resize
lowercase :int = Compose([Resize(lowerCamelCase ), ToTensor()] )
lowercase :Optional[int] = PetsDataset([file_names[i] for i in eval_split], image_transform=lowerCamelCase, label_to_id=lowerCamelCase )
# Instantiate dataloaders.
lowercase :Optional[int] = DataLoader(lowerCamelCase, shuffle=lowerCamelCase, batch_size=lowerCamelCase, num_workers=4 )
lowercase :int = DataLoader(lowerCamelCase, shuffle=lowerCamelCase, batch_size=lowerCamelCase, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase :Optional[Any] = create_model("resnet50d", pretrained=lowerCamelCase, num_classes=len(lowerCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase :int = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase :str = False
for param in model.get_classifier().parameters():
lowercase :List[str] = True
# We normalize the batches of images to be a bit faster.
lowercase :Union[str, Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowercase :List[Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase :Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase :Optional[Any] = OneCycleLR(optimizer=lowerCamelCase, max_lr=lowerCamelCase, epochs=lowerCamelCase, steps_per_epoch=len(lowerCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase :Union[str, Any] = accelerator.prepare(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
lowercase :str = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase :Dict = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
lowercase :int = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase :List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase :str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase :Optional[int] = os.path.splitext(lowerCamelCase )[0]
if "epoch" in training_difference:
lowercase :Dict = int(training_difference.replace("epoch_", "" ) ) + 1
lowercase :Optional[int] = None
else:
lowercase :Any = int(training_difference.replace("step_", "" ) )
lowercase :int = resume_step // len(lowerCamelCase )
resume_step -= starting_epoch * len(lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase, lowerCamelCase ):
model.train()
if args.with_tracking:
lowercase :Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase :List[Any] = accelerator.skip_first_batches(lowerCamelCase, lowerCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase :List[str] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase :int = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase :List[Any] = (batch["image"] - mean) / std
lowercase :str = model(lowerCamelCase )
lowercase :List[str] = torch.nn.functional.cross_entropy(lowerCamelCase, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase :Tuple = os.path.join(args.output_dir, lowerCamelCase )
accelerator.save_state(lowerCamelCase )
model.eval()
lowercase :List[str] = 0
lowercase :Optional[Any] = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase :Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase :List[Any] = (batch["image"] - mean) / std
with torch.no_grad():
lowercase :Dict = model(lowerCamelCase )
lowercase :Optional[int] = outputs.argmax(dim=-1 )
lowercase :List[str] = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowercase :Tuple = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase :Dict = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(lowerCamelCase ),
"epoch": epoch,
}, step=lowerCamelCase, )
if checkpointing_steps == "epoch":
lowercase :Dict = F"epoch_{epoch}"
if args.output_dir is not None:
lowercase :Dict = os.path.join(args.output_dir, lowerCamelCase )
accelerator.save_state(lowerCamelCase )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase__ ( ):
lowercase :Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=lowerCamelCase, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=lowerCamelCase, default=lowerCamelCase, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=lowerCamelCase, default=lowerCamelCase, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=lowerCamelCase, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=lowerCamelCase, default=lowerCamelCase, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=lowerCamelCase, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
lowercase :Any = parser.parse_args()
lowercase :Any = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
main()
| 710 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_UpperCAmelCase : List[str] = random.Random()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=1.0, lowerCamelCase=None, lowerCamelCase=None ):
if rng is None:
lowercase :Union[str, Any] = global_rng
lowercase :Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self: Optional[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any]=7 , _lowerCAmelCase: str=4_00 , _lowerCAmelCase: List[str]=20_00 , _lowerCAmelCase: Dict=20_48 , _lowerCAmelCase: Any=1_28 , _lowerCAmelCase: Any=1 , _lowerCAmelCase: List[Any]=5_12 , _lowerCAmelCase: Optional[int]=30 , _lowerCAmelCase: List[Any]=4_41_00 , ):
lowercase :Any = parent
lowercase :Any = batch_size
lowercase :Optional[int] = min_seq_length
lowercase :Optional[Any] = max_seq_length
lowercase :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase :List[str] = spectrogram_length
lowercase :int = feature_size
lowercase :Union[str, Any] = num_audio_channels
lowercase :Optional[int] = hop_length
lowercase :str = chunk_length
lowercase :List[str] = sampling_rate
def SCREAMING_SNAKE_CASE ( self: str ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: int=False ):
def _flatten(_lowerCAmelCase: Optional[Any] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
lowercase :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase :Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase :Optional[int] = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "sampling_rate" ) )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :Tuple = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
lowercase :Tuple = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
lowercase :Optional[Any] = feat_extract_first.to_dict()
lowercase :List[str] = feat_extract_second.to_dict()
lowercase :Any = dict_first.pop("mel_filters" )
lowercase :Any = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :Any = os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
lowercase :List[Any] = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
lowercase :Tuple = feat_extract_first.to_dict()
lowercase :int = feat_extract_second.to_dict()
lowercase :Tuple = dict_first.pop("mel_filters" )
lowercase :Tuple = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
# Initialize feature_extractor
lowercase :int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowercase :List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase :Union[str, Any] = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase :Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowercase :Optional[int] = feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowercase :Dict = feature_extractor(
_lowerCAmelCase , return_tensors="np" , sampling_rate=4_41_00 , mask_audio=_lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowercase :Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowercase :Union[str, Any] = np.asarray(_lowerCAmelCase )
lowercase :Dict = feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Optional[Any] ):
lowercase :str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase :Any = ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Optional[int] = self._load_datasamples(1 )
lowercase :Union[str, Any] = TvltFeatureExtractor()
lowercase :str = feature_extractor(_lowerCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
lowercase :Optional[int] = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _lowerCAmelCase , atol=1e-4 ) )
| 453 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = StableDiffusionSAGPipeline
__lowercase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Optional[int] = False
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , width=768 , height=512 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
__snake_case = output.images
assert image.shape == (1, 512, 768, 3)
| 24 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase__ )
if n > 1:
factors.append(UpperCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def A( snake_case_ ):
"""simple docstring"""
lowercase__: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case_ , max_perimeter + 1 ):
lowercase__: List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case_ ):
lowercase__: str = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A( snake_case_ = 1000 ):
"""simple docstring"""
lowercase__: Optional[int] = pythagorean_triple(snake_case_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 704 |
"""simple docstring"""
class _a :
'''simple docstring'''
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 0
lowercase__: Optional[Any] = 0
lowercase__: Any = {}
def __lowercase ( self , UpperCAmelCase_) -> Tuple:
'''simple docstring'''
if vertex not in self.adjacency:
lowercase__: int = {}
self.num_vertices += 1
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) -> str:
'''simple docstring'''
self.add_vertex(UpperCAmelCase_)
self.add_vertex(UpperCAmelCase_)
if head == tail:
return
lowercase__: List[str] = weight
lowercase__: Optional[Any] = weight
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__: Optional[int] = edge
edges.remove((tail, head, weight))
for i in range(len(UpperCAmelCase_)):
lowercase__: Optional[int] = list(edges[i])
edges.sort(key=lambda UpperCAmelCase_: e[2])
for i in range(len(UpperCAmelCase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
lowercase__: str = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__: Optional[Any] = edge
lowercase__: Optional[Any] = weight
lowercase__: Any = weight
def __str__( self) -> List[str]:
'''simple docstring'''
lowercase__: Any = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__: Tuple = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __lowercase ( UpperCAmelCase_=None , UpperCAmelCase_=None) -> Optional[int]:
'''simple docstring'''
lowercase__: Any = Graph()
if vertices is None:
lowercase__: List[Any] = []
if edges is None:
lowercase__: Any = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase_)
for edge in edges:
g.add_edge(*UpperCAmelCase_)
return g
class _a :
'''simple docstring'''
def __init__( self) -> List[str]:
'''simple docstring'''
lowercase__: str = {}
lowercase__: int = {}
def __len__( self) -> Optional[int]:
'''simple docstring'''
return len(self.parent)
def __lowercase ( self , UpperCAmelCase_) -> Any:
'''simple docstring'''
if item in self.parent:
return self.find(UpperCAmelCase_)
lowercase__: str = item
lowercase__: Any = 0
return item
def __lowercase ( self , UpperCAmelCase_) -> Tuple:
'''simple docstring'''
if item not in self.parent:
return self.make_set(UpperCAmelCase_)
if item != self.parent[item]:
lowercase__: Tuple = self.find(self.parent[item])
return self.parent[item]
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = self.find(UpperCAmelCase_)
lowercase__: Optional[int] = self.find(UpperCAmelCase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__: List[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__: Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__: List[str] = roota
return roota
return None
@staticmethod
def __lowercase ( UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: str = graph.num_vertices
lowercase__: Optional[Any] = Graph.UnionFind()
lowercase__: List[str] = []
while num_components > 1:
lowercase__: int = {}
for vertex in graph.get_vertices():
lowercase__: Dict = -1
lowercase__: Dict = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__: str = edge
edges.remove((tail, head, weight))
for edge in edges:
lowercase__ , lowercase__ , lowercase__: Optional[Any] = edge
lowercase__: List[str] = union_find.find(UpperCAmelCase_)
lowercase__: List[Any] = union_find.find(UpperCAmelCase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__: Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__: Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__: Optional[Any] = cheap_edge[vertex]
if union_find.find(UpperCAmelCase_) != union_find.find(UpperCAmelCase_):
union_find.union(UpperCAmelCase_ , UpperCAmelCase_)
mst_edges.append(cheap_edge[vertex])
lowercase__: Optional[int] = num_components - 1
lowercase__: str = Graph.build(edges=UpperCAmelCase_)
return mst
| 120 | 0 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = PriorTransformer
lowerCAmelCase__ : List[Any] = """hidden_states"""
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int]=0 ):
'''simple docstring'''
torch.manual_seed(UpperCamelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return (4, 8)
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return (4, 8)
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ ,lowercase__ = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**UpperCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowercase__ = model.to(UpperCamelCase )
if hasattr(UpperCamelCase , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**UpperCamelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(UpperCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1E-2 ) )
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : List[str] , UpperCamelCase : List[str]=1 , UpperCamelCase : int=768 , UpperCamelCase : Union[str, Any]=77 , UpperCamelCase : Tuple=0 ):
'''simple docstring'''
torch.manual_seed(UpperCamelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def UpperCamelCase__ (self : Dict , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(UpperCamelCase )
lowercase__ = self.get_dummy_seed_input(seed=UpperCamelCase )
with torch.no_grad():
lowercase__ = model(**UpperCamelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(UpperCamelCase )
lowercase__ = torch.tensor(UpperCamelCase )
assert torch_all_close(UpperCamelCase , UpperCamelCase , atol=1E-3 )
| 460 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
if not isinstance(A , A ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(A ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _SCREAMING_SNAKE_CASE (A ) -> float:
"""simple docstring"""
if not isinstance(A , A ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ = "<<<<<<< This should probably be modified because it mentions: "
a_ = "=======\n>>>>>>>\n"
a_ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path, args.datasets_directory )
class UpperCAmelCase_ ( __UpperCAmelCase ):
@staticmethod
def snake_case__ ( lowercase_):
snake_case_ : Optional[int] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__( self , lowercase_ , lowercase_ , *lowercase_):
snake_case_ : Tuple = get_logger("datasets-cli/converting")
snake_case_ : Any = tfds_path
snake_case_ : Optional[int] = datasets_directory
def snake_case__ ( self):
if os.path.isdir(self._tfds_path):
snake_case_ : List[str] = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
snake_case_ : Union[str, Any] = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
snake_case_ : Any = os.path.abspath(self._datasets_directory)
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}')
snake_case_ : List[str] = []
snake_case_ : str = []
snake_case_ : str = {}
if os.path.isdir(self._tfds_path):
snake_case_ : Optional[Any] = os.listdir(UpperCAmelCase_)
else:
snake_case_ : Optional[int] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}')
snake_case_ : Any = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
snake_case_ : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if not os.path.isfile(UpperCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(UpperCAmelCase_ , encoding="utf-8") as f:
snake_case_ : Any = f.readlines()
snake_case_ : Dict = []
snake_case_ : str = False
snake_case_ : int = False
snake_case_ : int = []
for line in lines:
snake_case_ : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case_ : List[str] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
snake_case_ : Optional[int] = ""
continue
elif "from absl import logging" in out_line:
snake_case_ : int = "from datasets import logging\n"
elif "getLogger" in out_line:
snake_case_ : Dict = out_line.replace("getLogger" , "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
snake_case_ : List[str] = True
snake_case_ : Optional[int] = list(filter(lambda lowercase_: e in out_line , UpperCAmelCase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_) + "\n")
out_lines.append(UpperCAmelCase_)
out_lines.append(UpperCAmelCase_)
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case_ : Dict = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case_ : Any = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , UpperCAmelCase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
snake_case_ : Any = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case_ : List[Any] = True
out_lines.append(UpperCAmelCase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case_ : Optional[int] = f_name.replace(".py" , "")
snake_case_ : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
snake_case_ : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
self._logger.info(F'Adding directory {output_dir}')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(UpperCAmelCase_)
if needs_manual_update:
with_manual_update.append(UpperCAmelCase_)
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as f:
f.writelines(UpperCAmelCase_)
self._logger.info(F'Converted in {output_file}')
for utils_file in utils_files:
try:
snake_case_ : List[Any] = os.path.basename(UpperCAmelCase_)
snake_case_ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "")]
self._logger.info(F'Moving {dest_folder} to {utils_file}')
shutil.copy(UpperCAmelCase_ , UpperCAmelCase_)
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.')
| 711 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a_ = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self):
snake_case_ : Optional[Any] = 4
snake_case_ : int = 3
snake_case_ : Dict = (32, 32)
snake_case_ : str = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Union[str, Any] = torch.tensor([10]).to(lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (3, 32, 32)
@property
def snake_case__ ( self):
return (3, 32, 32)
def snake_case__ ( self):
snake_case_ : str = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
snake_case_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self):
snake_case_ : List[Any] = 4
snake_case_ : Dict = 4
snake_case_ : Dict = (32, 32)
snake_case_ : Dict = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Any = torch.tensor([10]).to(lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (4, 32, 32)
@property
def snake_case__ ( self):
return (4, 32, 32)
def snake_case__ ( self):
snake_case_ : Any = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
snake_case_ : Any = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self):
snake_case_ , snake_case_ : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
snake_case_ : int = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def snake_case__ ( self):
snake_case_ , snake_case_ : Optional[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
model.to(lowercase_)
snake_case_ : str = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def snake_case__ ( self):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_ , snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
model_accelerate.to(lowercase_)
model_accelerate.eval()
snake_case_ : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
snake_case_ : List[str] = noise.to(lowercase_)
snake_case_ : Any = torch.tensor([10] * noise.shape[0]).to(lowercase_)
snake_case_ : Optional[int] = model_accelerate(lowercase_ , lowercase_)["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ : Union[str, Any] = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_ , low_cpu_mem_usage=lowercase_)
model_normal_load.to(lowercase_)
model_normal_load.eval()
snake_case_ : Optional[Any] = model_normal_load(lowercase_ , lowercase_)["sample"]
assert torch_all_close(lowercase_ , lowercase_ , rtol=1E-3)
def snake_case__ ( self):
snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update")
model.eval()
model.to(lowercase_)
snake_case_ : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
snake_case_ : List[str] = noise.to(lowercase_)
snake_case_ : str = torch.tensor([10] * noise.shape[0]).to(lowercase_)
with torch.no_grad():
snake_case_ : Tuple = model(lowercase_ , lowercase_).sample
snake_case_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ : Tuple = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-3))
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self , lowercase_=(32, 32)):
snake_case_ : List[Any] = 4
snake_case_ : str = 3
snake_case_ : str = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Dict = torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (3, 32, 32)
@property
def snake_case__ ( self):
return (3, 32, 32)
def snake_case__ ( self):
snake_case_ : List[str] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
snake_case_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case__ ( self):
snake_case_ , snake_case_ : Optional[int] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
snake_case_ : Dict = self.dummy_input
snake_case_ : Tuple = floats_tensor((4, 3) + (2_56, 2_56)).to(lowercase_)
snake_case_ : Tuple = noise
snake_case_ : Tuple = model(**lowercase_)
assert image is not None, "Make sure output is not None"
@slow
def snake_case__ ( self):
snake_case_ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256")
model.to(lowercase_)
snake_case_ : List[Any] = 4
snake_case_ : str = 3
snake_case_ : Dict = (2_56, 2_56)
snake_case_ : Tuple = torch.ones((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : List[Any] = torch.tensor(batch_size * [1E-4]).to(lowercase_)
with torch.no_grad():
snake_case_ : int = model(lowercase_ , lowercase_).sample
snake_case_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Union[str, Any] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2))
def snake_case__ ( self):
snake_case_ : List[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
model.to(lowercase_)
snake_case_ : Dict = 4
snake_case_ : str = 3
snake_case_ : List[Any] = (32, 32)
snake_case_ : int = torch.ones((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : List[Any] = torch.tensor(batch_size * [1E-4]).to(lowercase_)
with torch.no_grad():
snake_case_ : Optional[Any] = model(lowercase_ , lowercase_).sample
snake_case_ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Union[str, Any] = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2))
def snake_case__ ( self):
# not required for this model
pass
| 92 | 0 |
import argparse
UpperCAmelCase : Any = "docs/source/_static/js/custom.js"
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowerCamelCase = f'const stableVersion = \"v{version}\"\n'
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f' \"v{version}\": \"v{version}\",\n'
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
UpperCAmelCase : Union[str, Any] = parser.parse_args()
update_custom_js(args.version)
| 457 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["input_features", "attention_mask"]
def __init__( self , snake_case=8_0 , snake_case=1_6_0_0_0 , snake_case=8_0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case)
_UpperCAmelCase : Optional[Any] =num_mel_bins
_UpperCAmelCase : Optional[int] =do_ceptral_normalize
_UpperCAmelCase : Optional[Any] =normalize_means
_UpperCAmelCase : Optional[Any] =normalize_vars
_UpperCAmelCase : Tuple =True
def lowerCAmelCase ( self , snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] =waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : Dict =torch.from_numpy(snake_case).unsqueeze(0)
_UpperCAmelCase : Union[str, Any] =ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def lowerCAmelCase ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray:
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
_UpperCAmelCase : int =x[:input_length].mean(axis=0)
_UpperCAmelCase : List[Any] =np.subtract(snake_case , snake_case)
if normalize_vars:
_UpperCAmelCase : List[Any] =x[:input_length].std(axis=0)
_UpperCAmelCase : Optional[int] =np.divide(snake_case , snake_case)
if input_length < x.shape[0]:
_UpperCAmelCase : Dict =padding_value
# make sure array is in float32
_UpperCAmelCase : str =x.astype(np.floataa)
return x
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : str =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(snake_case , snake_case)
]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCAmelCase : Optional[int] =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_UpperCAmelCase : List[Any] =is_batched_numpy or (
isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCAmelCase : int =[np.asarray(snake_case , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray):
_UpperCAmelCase : Tuple =np.asarray(snake_case , dtype=np.floataa)
elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCAmelCase : int =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase : Dict =[raw_speech]
# extract fbank features
_UpperCAmelCase : Optional[Any] =[self._extract_fbank_features(snake_case) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : List[str] =BatchFeature({'input_features': features})
_UpperCAmelCase : Any =self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
_UpperCAmelCase : Dict =padded_inputs.get('input_features')
if isinstance(input_features[0] , snake_case):
_UpperCAmelCase : Any =[np.asarray(snake_case , dtype=np.floataa) for feature in input_features]
_UpperCAmelCase : int =padded_inputs.get('attention_mask')
if attention_mask is not None:
_UpperCAmelCase : Tuple =[np.asarray(snake_case , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : Optional[Any] =(
np.array(snake_case , dtype=np.intaa)
if self._get_padding_strategies(snake_case , max_length=snake_case) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : Optional[int] =self.normalize(
padded_inputs['input_features'] , attention_mask=snake_case)
if return_tensors is not None:
_UpperCAmelCase : List[Any] =padded_inputs.convert_to_tensors(snake_case)
return padded_inputs
| 446 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE :Any = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : List[int] = []
_lowerCamelCase : List[int] = []
def __init__( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : str="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : str="</s>" , snake_case_ : List[str]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Tuple="<pad>" , snake_case_ : Dict="<mask>" , snake_case_ : List[str]=None , snake_case_ : str=None , snake_case_ : Dict=None , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : Dict=None , snake_case_ : List[Any]=False , **snake_case_ : int , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = legacy_behaviour
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , tokenizer_file=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=snake_case_ , **snake_case_ , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else "eng_Latn"
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , snake_case_ : Any ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase ( self : Dict ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowercase ( self : List[str] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
_UpperCAmelCase = self.convert_tokens_to_ids(snake_case_ )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase ( self : str , snake_case_ : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self : Optional[int] , snake_case_ : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : List[str] , snake_case_ : List[Any] ):
_UpperCAmelCase = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def lowercase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def lowercase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : int , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self : str , snake_case_ : List[Any] ):
_UpperCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
def lowercase ( self : List[str] , snake_case_ : str ):
_UpperCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
| 712 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def lowercase ( *snake_case_ : Optional[int] , **snake_case_ : str ):
pass
@is_pipeline_test
@require_vision
class A_ ( unittest.TestCase ):
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = image_classifier(snake_case_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_ ) , [
[{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "b"}, {"score": 0.3_3_3, "label": "c"}],
[{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "c"}, {"score": 0.3_3_3, "label": "b"}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
] , )
@require_tf
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = image_classifier(snake_case_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "b"}, {"score": 0.3_3_3, "label": "c"}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
[
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
{"score": 0.3_3_3, "label": ANY(snake_case_ )},
],
] , )
@slow
@require_torch
def lowercase ( self : Dict ):
_UpperCAmelCase = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def lowercase ( self : Tuple ):
_UpperCAmelCase = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
],
]
* 5 , )
| 119 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = npimg.shape
return {"hash": hashimage(__lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowercase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = MaskGenerationPipeline(model=snake_case__ , image_processor=snake_case__)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""")
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""")
lowercase_ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_5_6)
# Shortening by hashing
lowercase_ = []
for i, o in enumerate(outputs["""masks"""]):
new_outupt += [{"mask": mask_to_test_readable(snake_case__), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case__ , decimals=4) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = """facebook/sam-vit-huge"""
lowercase_ = pipeline("""mask-generation""" , model=snake_case__)
lowercase_ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_5_6)
# Shortening by hashing
lowercase_ = []
for i, o in enumerate(outputs["""masks"""]):
new_outupt += [{"mask": mask_to_test_readable(snake_case__), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case__ , decimals=4) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_053},
] , )
| 567 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * len(lowerCAmelCase )
UpperCAmelCase = []
UpperCAmelCase = [1] * len(lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase )
while queue:
UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase )
print(max(lowerCAmelCase ) )
# Adjacency list of Graph
lowerCAmelCase_ : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 673 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowerCAmelCase = float('''nan''')
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[str] = sys.stdout
lowerCAmelCase__ : List[Any] = open(__UpperCAmelCase ,"""a""" )
def __getattr__( self ,__UpperCAmelCase ) -> Tuple:
return getattr(self.stdout ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
self.stdout.write(__UpperCAmelCase )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" ,"""""" ,__UpperCAmelCase ,0 ,re.M ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase=80 , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : int = []
# deal with critical env vars
lowerCAmelCase__ : Any = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
lowerCAmelCase__ : List[Any] = os.environ.get(UpperCamelCase , UpperCamelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCAmelCase__ : int = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = """"""
while len(UpperCamelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(UpperCamelCase ) == 0 or len(UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = """"""
return "\\\n".join(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
lowerCAmelCase__ : Dict = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCAmelCase__ : List[Any] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCAmelCase__ : Union[str, Any] = subprocess.run(UpperCamelCase , capture_output=UpperCamelCase , text=UpperCamelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
lowerCAmelCase__ : str = variation.replace(""" """ , """-""" )
with open(Path(UpperCamelCase ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = json.load(UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCAmelCase__ : Optional[int] = f"""{preamble}: """
lowerCAmelCase__ : str = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase ) , desc=UpperCamelCase , leave=UpperCamelCase ):
lowerCAmelCase__ : str = process_run_single(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase ):
metrics.append(UpperCamelCase )
results.append(UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
lowerCAmelCase__ : int = f"""\33[2K\r{outcome}"""
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : int = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCAmelCase__ : Any = round(mean_metrics[target_metric_key] , 2 )
lowerCAmelCase__ : Tuple = f"""{outcome} {mean_target}"""
if len(UpperCamelCase ) > 1:
results_str += f""" {tuple(round(UpperCamelCase , 2 ) for x in results )}"""
print(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = variation
return mean_metrics
else:
print(UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = pd.DataFrame(UpperCamelCase )
lowerCAmelCase__ : List[Any] = """variation"""
lowerCAmelCase__ : Tuple = """diff_%"""
lowerCAmelCase__ : Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCAmelCase__ : Optional[int] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
lowerCAmelCase__ : Dict = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase ):
lowerCAmelCase__ : Dict = df.apply(
lambda UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
lowerCAmelCase__ : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCAmelCase__ : List[str] = df.reindex(UpperCamelCase , axis="""columns""" ) # reorder cols
# capitalize
lowerCAmelCase__ : Optional[Any] = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
lowerCAmelCase__ : Any = df.rename(lambda UpperCamelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
lowerCAmelCase__ : int = df.rename(lambda UpperCamelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
lowerCAmelCase__ : Optional[Any] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=UpperCamelCase , type=UpperCamelCase , nargs="""+""" , required=UpperCamelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=UpperCamelCase , type=UpperCamelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=UpperCamelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=UpperCamelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=UpperCamelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=UpperCamelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
lowerCAmelCase__ : Tuple = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = args.output_dir
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = get_base_command(UpperCamelCase , UpperCamelCase )
# split each dimension into its --foo variations
lowerCAmelCase__ : Optional[int] = [list(map(str.strip , re.split(R"""\|""" , UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCAmelCase__ : Any = list(map(str.strip , map(""" """.join , itertools.product(*UpperCamelCase ) ) ) )
lowerCAmelCase__ : Dict = max(len(UpperCamelCase ) for x in variations )
# split wanted keys
lowerCAmelCase__ : List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCAmelCase__ : Union[str, Any] = f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCAmelCase__ : List[str] = Tee(UpperCamelCase )
print(f"""\n*** Running {len(UpperCamelCase )} benchmarks:""" )
print(f"""Base command: {' '.join(UpperCamelCase )}""" )
lowerCAmelCase__ : Union[str, Any] = """variation"""
lowerCAmelCase__ : Optional[Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase , desc="""Total completion: """ , leave=UpperCamelCase ) ):
lowerCAmelCase__ : List[str] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , args.target_metric_key , UpperCamelCase , args.repeat_times , UpperCamelCase , args.verbose , ) )
process_results(UpperCamelCase , args.target_metric_key , UpperCamelCase , args.base_variation , UpperCamelCase )
if __name__ == "__main__":
main()
| 160 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A_ : Union[str, Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 456 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : List[Any] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 430 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = TextaTextGenerationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return generator, ["Something to write", "Something else"]
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = generator('''Something there''' )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ANY(__lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
SCREAMING_SNAKE_CASE__ = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
] , )
SCREAMING_SNAKE_CASE__ = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
] , )
with self.assertRaises(__lowerCamelCase ):
generator(4 )
@require_torch
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE__ = generator('''Something there''' , do_sample=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ''''''}] )
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = generator(
'''Something there''' , num_return_sequences=__lowerCamelCase , num_beams=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = generator('''This is a test''' , do_sample=__lowerCamelCase , num_return_sequences=2 , return_tensors=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
SCREAMING_SNAKE_CASE__ = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE__ = '''<pad>'''
SCREAMING_SNAKE_CASE__ = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCamelCase , )
self.assertEqual(
__lowerCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE__ = generator('''Something there''' , do_sample=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ''''''}] )
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 | 0 |
'''simple docstring'''
from __future__ import annotations
A_ : Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = graph
# mapping node to its parent in resulting breadth first tree
snake_case__ : dict[str, str | None] = {}
snake_case__ : Dict = source_vertex
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = {self.source_vertex}
snake_case__ : int = None
snake_case__ : Any = [self.source_vertex] # first in first out queue
while queue:
snake_case__ : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = vertex
queue.append(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case__ : Union[str, Any] = self.parent.get(__SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
snake_case__ : Optional[Any] = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
return self.shortest_path(__SCREAMING_SNAKE_CASE ) + f"->{target_vertex}"
if __name__ == "__main__":
A_ : Optional[int] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = LongformerTokenizer
_lowerCAmelCase = True
_lowerCAmelCase = LongformerTokenizerFast
_lowerCAmelCase = True
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase : List[Any] = dict(zip(A, range(len(A ) ) ) )
lowerCamelCase : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase : Union[str, Any] = {'unk_token': '<unk>'}
lowerCamelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = 'lower newer'
lowerCamelCase : Any = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase : List[str] = 'lower newer'
lowerCamelCase : int = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase : List[str] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2], )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
lowerCamelCase : Union[str, Any] = tokenizer.encode('sequence builders', add_special_tokens=A )
lowerCamelCase : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
lowerCamelCase : Optional[Any] = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
lowerCamelCase : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_tokenizer()
lowerCamelCase : str = 'Encode this sequence.'
lowerCamelCase : Any = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowerCamelCase : Dict = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
lowerCamelCase : Any = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowerCamelCase : Union[str, Any] = tokenizer.encode(A, add_special_tokens=A )
lowerCamelCase : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
lowerCamelCase : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(A )
lowerCamelCase : List[Any] = 'Encode <mask> sequence'
lowerCamelCase : Any = 'Encode <mask>sequence'
lowerCamelCase : Optional[Any] = tokenizer.encode(A )
lowerCamelCase : List[str] = encoded.index(A )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
lowerCamelCase : List[Any] = tokenizer.encode(A )
lowerCamelCase : Optional[int] = encoded.index(A )
lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : List[str] = 'A, <mask> AllenNLP sentence.'
lowerCamelCase : Dict = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
lowerCamelCase : str = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : Optional[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase : Optional[Any] = F'''{text_of_1_token} {text_of_1_token}'''
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : int = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : Optional[int] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
lowerCamelCase : Dict = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : Optional[int] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : Optional[int] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
lowerCamelCase : Dict = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 320 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( __A , __A=False ) -> Any:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''
else:
_snake_case = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
_snake_case = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_A , _A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> int:
_snake_case = dct.pop(_A )
_snake_case = val
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Any:
_snake_case = ViTMSNConfig()
_snake_case = 1_000
_snake_case = 'datasets/huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = json.load(open(hf_hub_download(_A , _A ) , 'r' ) )
_snake_case = {int(_A ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case = 384
_snake_case = 1_536
_snake_case = 6
elif "l16" in checkpoint_url:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 0.1
elif "b4" in checkpoint_url:
_snake_case = 4
elif "l7" in checkpoint_url:
_snake_case = 7
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 0.1
_snake_case = ViTMSNModel(_A )
_snake_case = torch.hub.load_state_dict_from_url(_A , map_location='cpu' )['target_encoder']
_snake_case = ViTImageProcessor(size=config.image_size )
remove_projection_head(_A )
_snake_case = create_rename_keys(_A , base_model=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , base_model=_A )
model.load_state_dict(_A )
model.eval()
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(_A , stream=_A ).raw )
_snake_case = ViTImageProcessor(
size=config.image_size , image_mean=_A , image_std=_A )
_snake_case = image_processor(images=_A , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
_snake_case = model(**_A )
_snake_case = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
_snake_case = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
_snake_case = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
_snake_case = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
_snake_case = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _A , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCAmelCase ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 542 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase ):
@register_to_config
def __init__( self : int , A : int = 6_5_5_3_6 , A : Optional[int] = None , A : int = 2 , A : int = 2 , A : int = 0 , A : str = "fourier" , A : bool = True , A : bool = False , A : float = 0.0 , A : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A : Tuple[str] = "UNetMidBlock1D" , A : str = None , A : Tuple[int] = (3_2, 3_2, 6_4) , A : str = None , A : int = 8 , A : int = 1 , A : bool = False , ):
super().__init__()
_UpperCAmelCase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase : Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A , log=A , flip_sin_to_cos=A )
_UpperCAmelCase : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase : Optional[int] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A , downscale_freq_shift=A )
_UpperCAmelCase : Optional[int] = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase : Any = block_out_channels[0] * 4
_UpperCAmelCase : Dict = TimestepEmbedding(
in_channels=A , time_embed_dim=A , act_fn=A , out_dim=block_out_channels[0] , )
_UpperCAmelCase : List[str] = nn.ModuleList([] )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : int = nn.ModuleList([] )
_UpperCAmelCase : Tuple = None
# down
_UpperCAmelCase : List[Any] = in_channels
for i, down_block_type in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = output_channel
_UpperCAmelCase : Dict = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase : Any = i == len(A ) - 1
_UpperCAmelCase : Optional[int] = get_down_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A )
# mid
_UpperCAmelCase : List[str] = get_mid_block(
A , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A , add_downsample=A , )
# up
_UpperCAmelCase : Tuple = list(reversed(A ) )
_UpperCAmelCase : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase : int = out_channels
else:
_UpperCAmelCase : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(A ):
_UpperCAmelCase : Any = output_channel
_UpperCAmelCase : Dict = (
reversed_block_out_channels[i + 1] if i < len(A ) - 1 else final_upsample_channels
)
_UpperCAmelCase : List[Any] = i == len(A ) - 1
_UpperCAmelCase : str = get_up_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A )
_UpperCAmelCase : Tuple = output_channel
# out
_UpperCAmelCase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_UpperCAmelCase : List[Any] = get_out_block(
out_block_type=A , num_groups_out=A , embed_dim=block_out_channels[0] , out_channels=A , act_fn=A , fc_dim=block_out_channels[-1] // 4 , )
def snake_case_ ( self : Any , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : bool = True , ):
_UpperCAmelCase : Any = timestep
if not torch.is_tensor(A ):
_UpperCAmelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : Union[str, Any] = timesteps[None].to(sample.device )
_UpperCAmelCase : Union[str, Any] = self.time_proj(A )
if self.config.use_timestep_embedding:
_UpperCAmelCase : str = self.time_mlp(A )
else:
_UpperCAmelCase : Union[str, Any] = timestep_embed[..., None]
_UpperCAmelCase : Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase : Dict = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase , _UpperCAmelCase : Tuple = downsample_block(hidden_states=A , temb=A )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase : Tuple = self.mid_block(A , A )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase : Optional[int] = down_block_res_samples[-1:]
_UpperCAmelCase : Any = down_block_res_samples[:-1]
_UpperCAmelCase : Union[str, Any] = upsample_block(A , res_hidden_states_tuple=A , temb=A )
# 5. post-process
if self.out_block:
_UpperCAmelCase : Tuple = self.out_block(A , A )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A )
| 289 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
_UpperCAmelCase : List[str] = tmp_path / "file.csv"
_UpperCAmelCase : Union[str, Any] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = tmp_path / "malformed_file.csv"
_UpperCAmelCase : Dict = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = tmp_path / "csv_with_image.csv"
_UpperCAmelCase : List[str] = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict = tmp_path / "csv_with_label.csv"
_UpperCAmelCase : Any = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tmp_path / "csv_with_int_list.csv"
_UpperCAmelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Csv()
_UpperCAmelCase : Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
_UpperCAmelCase : Union[str, Any] = f.read().splitlines()[1]
_UpperCAmelCase : Tuple = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_UpperCAmelCase : Tuple = csv._generate_tables([[csv_file_with_image]] )
_UpperCAmelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_UpperCAmelCase : Optional[Any] = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
_UpperCAmelCase : Tuple = f.read().splitlines()[1:]
_UpperCAmelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_UpperCAmelCase : int = csv._generate_tables([[csv_file_with_label]] )
_UpperCAmelCase : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_UpperCAmelCase : Dict = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
_UpperCAmelCase : Dict = csv._generate_tables([[csv_file_with_int_list]] )
_UpperCAmelCase : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_UpperCAmelCase : Dict = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 289 | 1 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCAmelCase_ : Any = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase_ : Dict = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase_ : Tuple = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = 0
while b > 0:
if b & 1:
lowerCAmelCase_ : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 317 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__lowerCamelCase = ''
__lowerCamelCase = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCamelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__lowerCamelCase , __lowerCamelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
__lowerCamelCase = [1 for i in range(len(lowerCamelCase__ ) )]
# for each character in new_string find corresponding palindromic string
__lowerCamelCase = 0
for j in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCamelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__lowerCamelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__lowerCamelCase = j - k + 1 # noqa: E741
__lowerCamelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
__lowerCamelCase = length[j]
__lowerCamelCase = j
# create that string
__lowerCamelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469 | """simple docstring"""
from __future__ import annotations
from statistics import mean
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = burst_time[i]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase__ = []
lowerCAmelCase__ = -1
for i in range(lowerCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase__ = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : str = 4
__lowerCAmelCase : Dict = [2, 5, 3, 7]
__lowerCAmelCase : List[str] = [0, 0, 0, 0]
__lowerCAmelCase : Union[str, Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 644 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__magic_name__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__magic_name__ = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
UpperCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(_snake_case , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase = black.format_str(_snake_case , mode=_snake_case )
UpperCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_snake_case , '''w''' , newline='''\n''' ) as f:
f.write(_snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_snake_case )
with open(_snake_case , '''r''' ) as f:
self.assertTrue(f.read() , _snake_case )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> str:
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _snake_case ) , )
# Copy consistency with a really long name
UpperCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , _snake_case , _snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _snake_case , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _snake_case ) , )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme['''format_model_list'''] )
self.assertFalse(_snake_case )
self.assertEqual(_snake_case , _snake_case )
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_snake_case )
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_snake_case , _snake_case )
| 391 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__: Optional[int] , A__: List[Any] , A__: str ):
'''simple docstring'''
UpperCAmelCase = LxmertConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 391 | 1 |
"""simple docstring"""
def a_ ( lowercase__ :Any, lowercase__ :Optional[Any] ):
__lowerCamelCase = int(lowercase_ )
# Initialize Result
__lowerCamelCase = []
# Traverse through all denomination
for denomination in reversed(lowercase_ ):
# Find denominations
while int(lowercase_ ) >= int(lowercase_ ):
total_value -= int(lowercase_ )
answer.append(lowercase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__magic_name__ : List[str] = []
__magic_name__ : Union[str, Any] = """0"""
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__magic_name__ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
__magic_name__ : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__magic_name__ : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__magic_name__ : Optional[int] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
__magic_name__ : List[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 281 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_lowercase : str = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
a__ = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
# test with a raw waveform
a__ = np.zeros((3_4_0_0_0,) )
a__ = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
a__ , a__ = examples
a__ = audio_classifier(SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''score''': ANY(SCREAMING_SNAKE_CASE ), '''label''': ANY(SCREAMING_SNAKE_CASE )},
{'''score''': ANY(SCREAMING_SNAKE_CASE ), '''label''': ANY(SCREAMING_SNAKE_CASE )},
] , )
a__ = audio_classifier(SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''score''': ANY(SCREAMING_SNAKE_CASE ), '''label''': ANY(SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(SCREAMING_SNAKE_CASE )
@require_torchaudio
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
import datasets
# test with a local file
a__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
a__ = dataset[0]['''audio''']['''array''']
a__ = audio_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''score''': ANY(SCREAMING_SNAKE_CASE ), '''label''': ANY(SCREAMING_SNAKE_CASE )},
{'''score''': ANY(SCREAMING_SNAKE_CASE ), '''label''': ANY(SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
a__ = '''anton-l/wav2vec2-random-tiny-classifier'''
a__ = pipeline('''audio-classification''' , model=SCREAMING_SNAKE_CASE )
a__ = np.ones((8_0_0_0,) )
a__ = audio_classifier(SCREAMING_SNAKE_CASE , top_k=4 )
a__ = [
{'''score''': 0.08_42, '''label''': '''no'''},
{'''score''': 0.08_38, '''label''': '''up'''},
{'''score''': 0.08_37, '''label''': '''go'''},
{'''score''': 0.08_34, '''label''': '''right'''},
]
a__ = [
{'''score''': 0.08_45, '''label''': '''stop'''},
{'''score''': 0.08_44, '''label''': '''on'''},
{'''score''': 0.08_41, '''label''': '''right'''},
{'''score''': 0.08_34, '''label''': '''left'''},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
a__ = {'''array''': np.ones((8_0_0_0,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
a__ = audio_classifier(SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
import datasets
a__ = '''superb/wav2vec2-base-superb-ks'''
a__ = pipeline('''audio-classification''' , model=SCREAMING_SNAKE_CASE )
a__ = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
a__ = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
a__ = audio_classifier(SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=3 ) , [
{'''score''': 0.9_81, '''label''': '''go'''},
{'''score''': 0.0_07, '''label''': '''up'''},
{'''score''': 0.0_06, '''label''': '''_unknown_'''},
{'''score''': 0.0_01, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
| 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 148 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
snake_case__ = None
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
snake_case__ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
snake_case__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ['input_ids', 'attention_mask']
lowerCamelCase_ = MBartTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self : str , __A : Any=None , __A : Tuple=None , __A : Dict="<s>" , __A : Any="</s>" , __A : Union[str, Any]="</s>" , __A : str="<s>" , __A : List[Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : Dict="<mask>" , __A : Union[str, Any]=None , __A : Any=None , __A : Dict=None , **__A : str , ) ->List[str]:
"""simple docstring"""
a__ :str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , )
a__ :int = vocab_file
a__ :Optional[Any] = False if not self.vocab_file else True
a__ :Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a__ :List[Any] = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__ :List[Any] = src_lang if src_lang is not None else "en_XX"
a__ :Dict = self.convert_tokens_to_ids(self._src_lang )
a__ :Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self : Optional[Any] ) ->str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self : Dict , __A : str ) ->None:
"""simple docstring"""
a__ :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a__ :Union[str, Any] = [self.sep_token_id]
a__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : str , __A : Optional[int] , __A : str , __A : Optional[str] , __A : Optional[str] , **__A : Dict ) ->int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ :Optional[int] = src_lang
a__ :List[str] = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
a__ :Dict = self.convert_tokens_to_ids(__A )
a__ :Dict = tgt_lang_id
return inputs
def _snake_case ( self : Union[str, Any] , __A : List[str] , __A : str = "en_XX" , __A : Optional[List[str]] = None , __A : str = "ro_RO" , **__A : Optional[Any] , ) ->BatchEncoding:
"""simple docstring"""
a__ :str = src_lang
a__ :str = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self : List[str] , __A : Optional[Any] ) ->None:
"""simple docstring"""
a__ :int = self.convert_tokens_to_ids(__A )
a__ :List[str] = []
a__ :List[Any] = [self.eos_token_id, self.cur_lang_code]
a__ :List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a__ :Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
a__ :List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self : int , __A : str ) ->None:
"""simple docstring"""
a__ :str = self.convert_tokens_to_ids(__A )
a__ :List[Any] = []
a__ :Optional[Any] = [self.eos_token_id, self.cur_lang_code]
a__ :int = self.convert_ids_to_tokens(self.prefix_tokens )
a__ :Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
a__ :str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self : int , __A : str , __A : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
a__ :Tuple = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 395 |
def lowerCamelCase__ ( a : list , a : list , a : int , a : int , a : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
a__ :str = 0
a__ :Union[str, Any] = 0
a__ :Optional[int] = knapsack(a , a , a , a , index + 1 )
if weights[index] <= max_weight:
a__ :str = values[index] + knapsack(
a , a , a , max_weight - weights[index] , index + 1 )
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase :Optional[int] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 5_02_57 , lowerCAmelCase_ = 10_24 , lowerCAmelCase_ = 7_68 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = None , lowerCAmelCase_ = "gelu_new" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> Tuple:
super().__init__()
_A = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_A = prefix_inner_dim
_A = prefix_hidden_dim
_A = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_A = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_A = GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
_A = GPTaLMHeadModel(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> str:
_A = self.transformer.transformer.wte(lowerCAmelCase_ )
_A = self.encode_prefix(lowerCAmelCase_ )
_A = self.decode_prefix(lowerCAmelCase_ )
_A = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_A = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_A = torch.cat((dummy_token, input_ids) , dim=1 )
_A = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> torch.Tensor:
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.encode_prefix(lowerCAmelCase_ )
@torch.no_grad()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = torch.split(lowerCAmelCase_ , 1 , dim=0 )
_A = []
_A = []
for feature in features:
_A = self.decode_prefix(feature.to(lowerCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
_A , _A = self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_A = torch.stack(lowerCAmelCase_ )
_A = torch.stack(lowerCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = 5 , lowerCAmelCase_ = 67 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ) -> str:
_A = eos_token_id
_A = None
_A = None
_A = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int )
_A = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool )
if input_embeds is not None:
_A = input_embeds
else:
_A = self.transformer.transformer.wte(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_A = self.transformer(inputs_embeds=lowerCAmelCase_ )
_A = outputs.logits
_A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_A = logits.softmax(-1 ).log()
if scores is None:
_A , _A = logits.topk(lowerCAmelCase_ , -1 )
_A = generated.expand(lowerCAmelCase_ , *generated.shape[1:] )
_A , _A = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_A = next_tokens
else:
_A = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:] )
_A = torch.cat((tokens, next_tokens) , dim=1 )
else:
_A = -float(np.inf )
_A = 0
_A = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_A = scores_sum / seq_lengths[:, None]
_A , _A = scores_sum_average.view(-1 ).topk(lowerCAmelCase_ , -1 )
_A = next_tokens // scores_sum.shape[1]
_A = seq_lengths[next_tokens_source]
_A = next_tokens % scores_sum.shape[1]
_A = next_tokens.unsqueeze(1 )
_A = tokens[next_tokens_source]
_A = torch.cat((tokens, next_tokens) , dim=1 )
_A = generated[next_tokens_source]
_A = scores_sum_average * seq_lengths
_A = is_stopped[next_tokens_source]
_A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_A = torch.cat((generated, next_token_embed) , dim=1 )
_A = is_stopped + next_tokens.eq(lowerCAmelCase_ ).squeeze()
if is_stopped.all():
break
_A = scores / seq_lengths
_A = scores.argsort(descending=lowerCAmelCase_ )
# tokens tensors are already padded to max_seq_length
_A = [tokens[i] for i in order]
_A = torch.stack(lowerCAmelCase_ , dim=0 )
_A = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
from random import randint, random
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
"""simple docstring"""
lowercase__ = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ = 0
lowercase__ = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
lowercase__ = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
lowercase__ = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
lowercase__ = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
lowercase__ = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
lowercase__ = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
lowercase__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( _a):
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : float , **__A : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = feature_size
a__ :int = sampling_rate
a__ :List[str] = padding_value
a__ :str = kwargs.pop("padding_side" , "right" )
a__ :Any = kwargs.pop("return_attention_mask" , __A )
super().__init__(**__A )
def _snake_case ( self : str , __A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __A : Union[bool, str, PaddingStrategy] = True , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[Union[str, TensorType]] = None , ) ->BatchFeature:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ :Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
a__ :int = processed_features[self.model_input_names[0]]
a__ :str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__A ) == 0:
if return_attention_mask:
a__ :Optional[int] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ :int = required_input[0]
if isinstance(__A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ :Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__A ):
a__ :Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__A ):
a__ :Optional[Any] = "tf"
elif is_torch_tensor(__A ):
a__ :Optional[Any] = "pt"
elif isinstance(__A , (int, float, list, tuple, np.ndarray) ):
a__ :Union[str, Any] = "np"
else:
raise ValueError(
F'''type of {first_element} unknown: {type(__A )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ :Optional[Any] = to_numpy(__A )
else:
a__ :Union[str, Any] = [to_numpy(__A ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ :Optional[int] = self._get_padding_strategies(padding=__A , max_length=__A )
a__ :int = processed_features[self.model_input_names[0]]
a__ :Union[str, Any] = len(__A )
if not all(len(__A ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a__ :int = []
for i in range(__A ):
a__ :Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ :Tuple = self._truncate(
__A , max_length=__A , pad_to_multiple_of=__A , truncation=__A , )
truncated_inputs.append(__A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ :str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ :str = PaddingStrategy.MAX_LENGTH
a__ :str = {}
for i in range(__A ):
# padding
a__ :Optional[int] = self._pad(
truncated_inputs[i] , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ :Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
a__ :List[Any] = value.astype(np.floataa )
batch_outputs[key].append(__A )
return BatchFeature(__A , tensor_type=__A )
def _snake_case ( self : List[Any] , __A : Union[Dict[str, np.ndarray], BatchFeature] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ) ->dict:
"""simple docstring"""
a__ :Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ :List[Any] = len(__A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ :List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ :int = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ :Dict = np.ones(len(__A ) , dtype=np.intaa )
if needs_to_be_padded:
a__ :List[str] = max_length - len(__A )
if self.padding_side == "right":
if return_attention_mask:
a__ :List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
a__ :List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ :Any = np.pad(
__A , __A , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ :Dict = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a__ :List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ :List[str] = np.pad(
__A , __A , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _snake_case ( self : Tuple , __A : Union[Dict[str, np.ndarray], BatchFeature] , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[bool] = None , ) ->Optional[Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a__ :str = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ :Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ :List[Any] = len(__A ) > max_length
if needs_to_be_truncated:
a__ :Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ :List[Any] = processed_features["attention_mask"][:max_length]
return processed_features
def _snake_case ( self : Union[str, Any] , __A : int=False , __A : Dict=None ) ->Optional[int]:
"""simple docstring"""
if padding is not False:
if padding is True:
a__ :Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__A , __A ):
a__ :Union[str, Any] = PaddingStrategy(__A )
elif isinstance(__A , __A ):
a__ :Union[str, Any] = padding
else:
a__ :Optional[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 395 | 0 |
'''simple docstring'''
lowercase__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple=13, UpperCamelCase__ : Optional[int]=10, UpperCamelCase__ : Union[str, Any]=3, UpperCamelCase__ : Optional[Any]=2, UpperCamelCase__ : Optional[int]=2, UpperCamelCase__ : int=2, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : int=True, UpperCamelCase__ : Union[str, Any]=32, UpperCamelCase__ : Dict=5, UpperCamelCase__ : Union[str, Any]=4, UpperCamelCase__ : int=37, UpperCamelCase__ : List[Any]="gelu", UpperCamelCase__ : List[str]=0.1, UpperCamelCase__ : int=0.1, UpperCamelCase__ : Union[str, Any]=10, UpperCamelCase__ : Dict=0.02, UpperCamelCase__ : Any=0.9, UpperCamelCase__ : Dict=None, ) -> Dict:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = patch_size
_A = tubelet_size
_A = num_frames
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_A = (image_size // patch_size) ** 2
_A = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_A = int(mask_ratio * self.seq_length )
def __UpperCAmelCase ( self : List[str] ) -> int:
_A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : str ) -> str:
_A = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any ) -> Union[str, Any]:
_A = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_A = torch.ones((self.num_masks,) )
_A = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_A = mask.expand(self.batch_size, -1 ).bool()
_A = model(UpperCamelCase__, UpperCamelCase__ )
# model only returns predictions for masked patches
_A = mask.sum().item()
_A = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def __UpperCAmelCase ( self : str ) -> Tuple:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__lowerCAmelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Tuple ) -> Any:
_A = VideoMAEModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : str=False ) -> List[Any]:
_A = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_A = torch.ones((self.model_tester.num_masks,) )
_A = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_A = mask.expand(self.model_tester.batch_size, -1 ).bool()
_A = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
_A = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
return inputs_dict
def __UpperCAmelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
pass
def __UpperCAmelCase ( self : str ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__, nn.Linear ) )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCamelCase__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
_A = self.model_tester.seq_length - self.model_tester.num_masks
_A = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_A = True
_A = False
_A = True
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
_A = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
_A = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
_A = len(UpperCamelCase__ )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
self.assertEqual(out_len + 1, len(UpperCamelCase__ ) )
_A = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def __UpperCAmelCase ( self : int ) -> Dict:
def check_hidden_states_output(UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : str ):
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
_A = outputs.hidden_states
_A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
_A = self.model_tester.seq_length - self.model_tester.num_masks
_A = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( ):
_A = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_A = np.load(__snake_case )
return list(__snake_case )
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Dict ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
UpperCamelCase__ )
_A = self.default_image_processor
_A = prepare_video()
_A = image_processor(UpperCamelCase__, return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCamelCase__ )
# verify the logits
_A = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
_A = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_A = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(UpperCamelCase__ )
_A = self.default_image_processor
_A = prepare_video()
_A = image_processor(UpperCamelCase__, return_tensors='pt' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
_A = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos', filename='bool_masked_pos.pt' )
_A = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCamelCase__ )
# verify the logits
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], UpperCamelCase__, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_A = torch.tensor([0.5_142], device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_A = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short', norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
_A = model(**UpperCamelCase__ )
_A = torch.tensor(torch.tensor([0.6_469] ), device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1e-4 ) )
| 107 | '''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : int | str ):
_A = str(__snake_case )
return n == n[::-1]
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_0_0_0_0_0_0 ):
_A = 0
for i in range(1 , __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 107 | 1 |
'''simple docstring'''
import os
import sys
import unittest
A__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A__ : Dict = os.path.join(git_repo_path, '''src''', '''diffusers''')
class snake_case__ ( unittest.TestCase ):
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case : str = find_backend(' if not is_torch_available():' )
self.assertEqual(__a , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__snake_case : Optional[int] = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(__a , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__snake_case : Dict = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(__a , 'torch_and_transformers_and_onnx' )
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , __a )
self.assertIn('torch_and_transformers' , __a )
self.assertIn('flax_and_transformers' , __a )
self.assertIn('torch_and_transformers_and_onnx' , __a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def A_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case : Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(__a , '\nCONSTANT = None\n' )
__snake_case : Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
__a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__snake_case : Any = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
__snake_case : Dict = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(__a , __a )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
__snake_case : Tuple = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , __a )
| 124 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
A__ : List[str] = '''docs/source/en/_toctree.yml'''
def a_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
__snake_case : str = defaultdict(_UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case : Any = [key for key, value in counts.items() if value > 1]
__snake_case : Dict = []
for duplicate_key in duplicates:
__snake_case : Optional[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_UpperCAmelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : s["title"].lower() )
def a_ ( _UpperCAmelCase : Tuple=False ) -> List[str]:
with open(_UpperCAmelCase ,encoding='utf-8' ) as f:
__snake_case : int = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Tuple = content[api_idx]['sections']
# Then to the model doc
__snake_case : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case : Any = api_doc[model_idx]['sections']
__snake_case : int = [(idx, section) for idx, section in enumerate(_UpperCAmelCase ) if 'sections' in section]
__snake_case : Tuple = False
for idx, modality_doc in modalities_docs:
__snake_case : Dict = modality_doc['sections']
__snake_case : Union[str, Any] = clean_model_doc_toc(_UpperCAmelCase )
if old_modality_doc != new_modality_doc:
__snake_case : Optional[Any] = True
if overwrite:
__snake_case : Any = new_modality_doc
if diff:
if overwrite:
__snake_case : int = model_doc
__snake_case : List[Any] = api_doc
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_UpperCAmelCase ,allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A__ : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 124 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE__ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
SCREAMING_SNAKE_CASE__ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase ( _snake_case : List[Any] ):
'''simple docstring'''
def remove_articles(_snake_case : List[Any] ):
lowercase__ = re.compile(R"\b(a|an|the)\b" ,re.UNICODE )
return re.sub(_snake_case ," " ,_snake_case )
def white_space_fix(_snake_case : Dict ):
return " ".join(text.split() )
def remove_punc(_snake_case : Tuple ):
lowercase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : List[Any] ):
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = [any(compute_exact(_snake_case ,_snake_case ) for ref in refs ) for pred, refs in zip(_snake_case ,_snake_case )]
return (sum(_snake_case ) / len(_snake_case )) * 100
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : str ):
'''simple docstring'''
lowercase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowercase__ = Counter(_snake_case )
lowercase__ = Counter(_snake_case )
lowercase__ = Counter()
for sgram, scount in sgramcounter.items():
lowercase__ = scount * numref
lowercase__ = Counter(_snake_case )
lowercase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowercase__ = ccount * numref
# KEEP
lowercase__ = sgramcounter_rep & cgramcounter_rep
lowercase__ = keepgramcounter_rep & rgramcounter
lowercase__ = sgramcounter_rep & rgramcounter
lowercase__ = 0
lowercase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__ = 1
lowercase__ = 1
if len(_snake_case ) > 0:
lowercase__ = keeptmpscorea / len(_snake_case )
if len(_snake_case ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowercase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowercase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowercase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowercase__ = sgramcounter_rep - cgramcounter_rep
lowercase__ = delgramcounter_rep - rgramcounter
lowercase__ = sgramcounter_rep - rgramcounter
lowercase__ = 0
lowercase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__ = 1
if len(_snake_case ) > 0:
lowercase__ = deltmpscorea / len(_snake_case )
# ADDITION
lowercase__ = set(_snake_case ) - set(_snake_case )
lowercase__ = set(_snake_case ) & set(_snake_case )
lowercase__ = set(_snake_case ) - set(_snake_case )
lowercase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__ = 1
lowercase__ = 1
if len(_snake_case ) > 0:
lowercase__ = addtmpscore / len(_snake_case )
if len(_snake_case ) > 0:
lowercase__ = addtmpscore / len(_snake_case )
lowercase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowercase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase ( _snake_case : str ,_snake_case : Optional[Any] ,_snake_case : Dict ):
'''simple docstring'''
lowercase__ = len(_snake_case )
lowercase__ = ssent.split(" " )
lowercase__ = csent.split(" " )
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rsent in rsents:
lowercase__ = rsent.split(" " )
lowercase__ = []
lowercase__ = []
lowercase__ = []
ragramslist.append(_snake_case )
for i in range(0 ,len(_snake_case ) - 1 ):
if i < len(_snake_case ) - 1:
lowercase__ = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_snake_case )
if i < len(_snake_case ) - 2:
lowercase__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_snake_case )
if i < len(_snake_case ) - 3:
lowercase__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_snake_case )
ragramslist.append(_snake_case )
ragramslist.append(_snake_case )
ragramslist.append(_snake_case )
for i in range(0 ,len(_snake_case ) - 1 ):
if i < len(_snake_case ) - 1:
lowercase__ = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_snake_case )
if i < len(_snake_case ) - 2:
lowercase__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_snake_case )
if i < len(_snake_case ) - 3:
lowercase__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_snake_case )
for i in range(0 ,len(_snake_case ) - 1 ):
if i < len(_snake_case ) - 1:
lowercase__ = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_snake_case )
if i < len(_snake_case ) - 2:
lowercase__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_snake_case )
if i < len(_snake_case ) - 3:
lowercase__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_snake_case )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(_snake_case ,_snake_case ,_snake_case ,_snake_case )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(_snake_case ,_snake_case ,_snake_case ,_snake_case )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(_snake_case ,_snake_case ,_snake_case ,_snake_case )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowercase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowercase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowercase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : bool = True ,_snake_case : str = "13a" ,_snake_case : bool = True ):
'''simple docstring'''
if lowercase:
lowercase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowercase__ = sacrebleu.metrics.bleu._get_tokenizer(_snake_case )()(_snake_case )
else:
lowercase__ = sacrebleu.TOKENIZERS[tokenizer]()(_snake_case )
elif tokenizer == "moses":
lowercase__ = sacremoses.MosesTokenizer().tokenize(_snake_case ,return_str=_snake_case ,escape=_snake_case )
elif tokenizer == "penn":
lowercase__ = sacremoses.MosesTokenizer().penn_tokenize(_snake_case ,return_str=_snake_case )
else:
lowercase__ = sentence
if not return_str:
lowercase__ = normalized_sent.split()
return normalized_sent
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : List[Any] ):
'''simple docstring'''
if not (len(_snake_case ) == len(_snake_case ) == len(_snake_case )):
raise ValueError("Sources length must match predictions and references lengths." )
lowercase__ = 0
for src, pred, refs in zip(_snake_case ,_snake_case ,_snake_case ):
sari_score += SARIsent(normalize(_snake_case ) ,normalize(_snake_case ) ,[normalize(_snake_case ) for sent in refs] )
lowercase__ = sari_score / len(_snake_case )
return 100 * sari_score
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Union[str, Any] ,_snake_case : Optional[Any]="exp" ,_snake_case : int=None ,_snake_case : Any=False ,_snake_case : List[str]=False ,_snake_case : int=False ,):
'''simple docstring'''
lowercase__ = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ = [[refs[i] for refs in references] for i in range(_snake_case )]
lowercase__ = sacrebleu.corpus_bleu(
_snake_case ,_snake_case ,smooth_method=_snake_case ,smooth_value=_snake_case ,force=_snake_case ,lowercase=_snake_case ,use_effective_order=_snake_case ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case (datasets.Metric ):
def _a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" ,id="sequence" ) ,id="references" ),
} ) ,codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] ,reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
lowercase__ = {}
result.update({"sari": compute_sari(sources=UpperCAmelCase_ ,predictions=UpperCAmelCase_ ,references=UpperCAmelCase_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase_ ,references=UpperCAmelCase_ )} )
result.update({"exact": compute_em(predictions=UpperCAmelCase_ ,references=UpperCAmelCase_ )} )
return result
| 267 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( _snake_case : int ,_snake_case : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((lowercase__) , (lowercase__)) = extended_euclid(_snake_case ,a % b )
lowercase__ = a // b
return (y, x - k * y)
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : int ,_snake_case : int ):
'''simple docstring'''
((lowercase__) , (lowercase__)) = extended_euclid(_snake_case ,_snake_case )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase ( _snake_case : int ,_snake_case : int ):
'''simple docstring'''
((lowercase__) , (lowercase__)) = extended_euclid(_snake_case ,_snake_case )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : int ,_snake_case : int ):
'''simple docstring'''
lowercase__ , lowercase__ = invert_modulo(_snake_case ,_snake_case ), invert_modulo(_snake_case ,_snake_case )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 267 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DebertaTokenizer
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Optional[Any] = DebertaTokenizerFast
def a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ = {'unk_token': '[UNK]'}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , snake_case ):
snake_case_ = 'lower newer'
snake_case_ = 'lower newer'
return input_text, output_text
def a ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = 'lower newer'
snake_case_ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case_ = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def a ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer('Hello' , 'World' )
snake_case_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , snake_case )
@slow
def a ( self ):
snake_case_ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case_ = tokenizer.encode('sequence builders' , add_special_tokens=snake_case )
snake_case_ = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case )
snake_case_ = tokenizer.encode(
'sequence builders' , add_special_tokens=snake_case , add_prefix_space=snake_case )
snake_case_ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=snake_case , add_prefix_space=snake_case )
snake_case_ = tokenizer.build_inputs_with_special_tokens(snake_case )
snake_case_ = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def a ( self ):
snake_case_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
snake_case_ = tokenizer(snake_case , padding=snake_case )
snake_case_ = [tokenizer.decode(snake_case , skip_special_tokens=snake_case ) for seq in encoding['input_ids']]
# fmt: off
snake_case_ = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , snake_case )
for expected, decoded in zip(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
| 108 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = '''upernet'''
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ):
super().__init__(**snake_case )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case_ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(snake_case , snake_case ):
snake_case_ = backbone_config.get('model_type' )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(snake_case )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 108 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _UpperCAmelCase ( __lowerCamelCase : ndarray ) -> float:
return np.dot(__lowerCamelCase , __lowerCamelCase )
class lowerCAmelCase__ :
def __init__( self : str , *,
_lowerCamelCase : float = np.inf , _lowerCamelCase : str = "linear" , _lowerCamelCase : float = 0.0 , ):
_snake_case = regularization
_snake_case = gamma
if kernel == "linear":
_snake_case = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
_snake_case = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_snake_case = f'''Unknown kernel: {kernel}'''
raise ValueError(_lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : ndarray , _lowerCamelCase : ndarray ):
return np.dot(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : ndarray , _lowerCamelCase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : list[ndarray] , _lowerCamelCase : ndarray ):
_snake_case = observations
_snake_case = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_snake_case) , ) = np.shape(_lowerCamelCase )
def to_minimize(_lowerCamelCase : ndarray ) -> float:
_snake_case = 0
((_snake_case) , ) = np.shape(_lowerCamelCase )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCamelCase )
_snake_case = LinearConstraint(_lowerCamelCase , 0 , 0 )
_snake_case = Bounds(0 , self.regularization )
_snake_case = minimize(
_lowerCamelCase , np.ones(_lowerCamelCase ) , bounds=_lowerCamelCase , constraints=[ly_contraint] ).x
_snake_case = l_star
# calculating mean offset of separation plane to points
_snake_case = 0
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_snake_case = s / n
def lowercase ( self : List[Any] , _lowerCamelCase : ndarray ):
_snake_case = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : int ):
_snake_case = tempfile.mkdtemp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] , **_lowerCamelCase : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Tuple ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Dict , **_lowerCamelCase : Tuple ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[Any] ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Tuple ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 224 | 1 |
import qiskit
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : int = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : Union[str, Any] = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 703 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : str , _lowerCamelCase : str = "▁" , _lowerCamelCase : bool = True , _lowerCamelCase : Union[str, AddedToken] = "<unk>" , _lowerCamelCase : Union[str, AddedToken] = "</s>" , _lowerCamelCase : Union[str, AddedToken] = "<pad>" , ):
'''simple docstring'''
__lowerCamelCase : Tuple = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
__lowerCamelCase : Optional[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowerCamelCase : Tuple = token_dict["""token"""]
__lowerCamelCase : Optional[int] = Tokenizer(Unigram() )
__lowerCamelCase : str = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
__lowerCamelCase : int = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase , add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowerCamelCase : Tuple = decoders.Metaspace(replacement=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = TemplateProcessing(
single=F"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
__lowerCamelCase : Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Dict , _lowerCamelCase : Union[str, List[str]] , _lowerCamelCase : int = 8_0_0_0 , _lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCamelCase , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Optional[int] = [files]
self._tokenizer.train(_lowerCamelCase , trainer=_lowerCamelCase )
self.add_unk_id()
def _snake_case ( self : List[Any] , _lowerCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _lowerCamelCase : int = 8_0_0_0 , _lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowerCamelCase : Any = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCamelCase , )
self._tokenizer.train_from_iterator(_lowerCamelCase , trainer=_lowerCamelCase )
self.add_unk_id()
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : List[str] = json.loads(self._tokenizer.to_str() )
__lowerCamelCase : Optional[int] = self.special_tokens["""unk"""]["""id"""]
__lowerCamelCase : List[str] = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 458 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 432 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = JukeboxTokenizer
UpperCAmelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 87 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 't5'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: List[Any] ,__lowerCAmelCase: str=32_128 ,__lowerCAmelCase: Optional[int]=512 ,__lowerCAmelCase: Union[str, Any]=64 ,__lowerCAmelCase: Dict=2_048 ,__lowerCAmelCase: str=6 ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Any=8 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: str=128 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Optional[int]=1e-6 ,__lowerCAmelCase: List[Any]=1.0 ,__lowerCAmelCase: Any="relu" ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: Optional[int]=1 ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Any = d_model
_lowerCamelCase : List[str] = d_kv
_lowerCamelCase : Dict = d_ff
_lowerCamelCase : Tuple = num_layers
_lowerCamelCase : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : str = num_heads
_lowerCamelCase : int = relative_attention_num_buckets
_lowerCamelCase : Any = relative_attention_max_distance
_lowerCamelCase : Union[str, Any] = dropout_rate
_lowerCamelCase : Tuple = layer_norm_epsilon
_lowerCamelCase : int = initializer_factor
_lowerCamelCase : int = feed_forward_proj
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Tuple = self.feed_forward_proj.split("-" )
_lowerCamelCase : Union[str, Any] = act_info[-1]
_lowerCamelCase : int = act_info[0] == "gated"
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : int = "gelu_new"
super().__init__(
pad_token_id=__a ,eos_token_id=__a ,is_encoder_decoder=__a ,**__a ,)
class A_ ( _a ):
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowerCamelCase : Optional[Any] = "past_encoder_sequence + sequence"
_lowerCamelCase : List[str] = {0: "batch"}
_lowerCamelCase : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCamelCase : List[str] = {0: "batch", 1: "decoder_sequence"}
_lowerCamelCase : List[str] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a ,direction="inputs" )
return common_inputs
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return 13 | 712 |
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , 2 ) - a
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
return 2 * x
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
_lowerCamelCase : Tuple = 2.0
while start <= a:
_lowerCamelCase : int = math.pow(_lowerCamelCase , 2 )
return start
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 9999 , _lowerCamelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_lowerCamelCase : Any = get_initial_point(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = value
_lowerCamelCase : Tuple = value - fx(_lowerCamelCase , _lowerCamelCase ) / fx_derivative(_lowerCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 386 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCamelCase_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase_ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase_ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCamelCase_ : List[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCamelCase_ : int = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCamelCase_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCamelCase_ : str = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCamelCase_ : int = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCamelCase_ : Dict = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCamelCase_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCamelCase_ : str = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCamelCase_ : int = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCamelCase_ : int = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCamelCase_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FLAX_MODEL_MAPPING
UpperCamelCase_ : str = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ : str = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase_ : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase_ : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase_ : List[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE_ : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase_ : int = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 185 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 352 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCAmelCase = random.Random()
def _lowerCamelCase ( _a , _a=1.0 , _a=None , _a=None ):
"""simple docstring"""
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=4_00 , a__=20_00 , a__=10 , a__=1_60 , a__=8 , a__=0.0 , a__=40_00 , a__=False , a__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def _UpperCAmelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*_a ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( UpperCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(_a )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_lowerCamelCase = self.feature_extraction_class.from_json_file(_a )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def _UpperCAmelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(_a , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(_a , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_lowerCamelCase = np.asarray(_a )
_lowerCamelCase = feature_extractor(_a , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
_lowerCamelCase = [np.asarray(_a ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(_a , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(_a , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1E-4 ) )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1E-3 ) )
| 710 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("transformers.models.speecht5")
_UpperCAmelCase = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_UpperCAmelCase = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_UpperCAmelCase = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_UpperCAmelCase = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_UpperCAmelCase = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_UpperCAmelCase = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_UpperCAmelCase = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_UpperCAmelCase = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = []
_UpperCAmelCase = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _lowerCamelCase ( _a , _a , _a , _a , _a ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(_a , _a )
if weight_type is not None:
_lowerCamelCase = getattr(_a , _a ).shape
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "running_mean":
_lowerCamelCase = value
elif weight_type == "running_var":
_lowerCamelCase = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCamelCase , _lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = []
if task == "s2t":
_lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCamelCase = MAPPING_S2T
_lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCamelCase = None
_lowerCamelCase = MAPPING_T2S
_lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCamelCase = MAPPING_S2S
_lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_a , _a ):
logger.info(F'''{name} was ignored''' )
continue
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCamelCase , _lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(_a )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
_lowerCamelCase = '''weight'''
elif "running_mean" in name:
_lowerCamelCase = '''running_mean'''
elif "running_var" in name:
_lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCamelCase = '''num_batches_tracked'''
else:
_lowerCamelCase = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( _a , _a , _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_a )
@torch.no_grad()
def _lowerCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , ):
"""simple docstring"""
if config_path is not None:
_lowerCamelCase = SpeechTaConfig.from_pretrained(_a )
else:
_lowerCamelCase = SpeechTaConfig()
if task == "s2t":
_lowerCamelCase = config.max_text_positions
_lowerCamelCase = SpeechTaForSpeechToText(_a )
elif task == "t2s":
_lowerCamelCase = 1_8_7_6
_lowerCamelCase = 6_0_0
_lowerCamelCase = config.max_speech_positions
_lowerCamelCase = SpeechTaForTextToSpeech(_a )
elif task == "s2s":
_lowerCamelCase = 1_8_7_6
_lowerCamelCase = config.max_speech_positions
_lowerCamelCase = SpeechTaForSpeechToSpeech(_a )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
_lowerCamelCase = SpeechTaTokenizer(_a , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
_lowerCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_lowerCamelCase = SpeechTaFeatureExtractor()
_lowerCamelCase = SpeechTaProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(_a )
_lowerCamelCase = torch.load(_a )
recursively_load_weights(fairseq_checkpoint['''model'''] , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 297 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A (__lowerCamelCase :List[str] ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase = [image]
_lowerCAmelCase = [trans(img.convert("""RGB""" ) ) for img in image]
_lowerCAmelCase = torch.stack(__lowerCamelCase )
return image
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
_lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}' )
_lowerCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = init_latents.shape
_lowerCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print("""add noise to latents at timestep""" , _lowercase )
_lowerCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , _lowercase = None , _lowercase = 0.8 , _lowercase = 1 , _lowercase = None , _lowercase = 0.0 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
self.check_inputs(_lowercase )
# 2. Preprocess image
_lowerCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
_lowerCAmelCase , _lowerCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
_lowerCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
_lowerCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
_lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 5 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''keras_nlp''']
def __init__( self : Union[str, Any] ,*_a : List[Any] ,**_a : int ):
'''simple docstring'''
requires_backends(self ,['keras_nlp'] )
| 229 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase : List[str] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
_UpperCamelCase : Tuple = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
_UpperCamelCase : List[Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _UpperCamelCase):
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Optional[Any] = ["input_ids", "attention_mask"]
lowerCamelCase__ : Union[str, Any] = DistilBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Optional[Any]:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowercase__ : List[Any] = getattr(a , normalizer_state.pop('type' ) )
lowercase__ : Optional[int] = do_lower_case
lowercase__ : Any = strip_accents
lowercase__ : int = tokenize_chinese_chars
lowercase__ : Dict = normalizer_class(**a )
lowercase__ : List[Any] = do_lower_case
def _UpperCAmelCase ( self , a , a=None ) -> Union[str, Any]:
lowercase__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , a , a = None ) -> Optional[Any]:
lowercase__ : Optional[Any] = [self.sep_token_id]
lowercase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , a , a = None ) -> List[Any]:
lowercase__ : List[str] = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 715 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645 | 0 |
'''simple docstring'''
import math
UpperCAmelCase_ : Optional[Any] = 1_0
UpperCAmelCase_ : Dict = 7
UpperCAmelCase_ : List[str] = BALLS_PER_COLOUR * NUM_COLOURS
def _lowercase ( UpperCamelCase__ : int = 20 ):
__A : Union[str, Any] = math.comb(UpperCamelCase__, UpperCamelCase__ )
__A : Dict = math.comb(NUM_BALLS - BALLS_PER_COLOUR, UpperCamelCase__ )
__A : Tuple = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(2_0))
| 365 |
'''simple docstring'''
from math import factorial
UpperCAmelCase_ : List[str] = {str(d): factorial(d) for d in range(1_0)}
def _lowercase ( UpperCamelCase__ : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) )
def _lowercase ( ):
__A : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3, UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 365 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 1000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 2**power
_UpperCAmelCase = str(__lowercase )
_UpperCAmelCase = list(__lowercase )
_UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(__lowercase )
return sum_of_num
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Optional[Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__SCREAMING_SNAKE_CASE :Dict = solution(power)
print('''Sum of the digits is: ''', result)
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :List[str] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : list ):
'''simple docstring'''
snake_case: str = len(__A )
for i in range(1 , __A ):
snake_case: Union[str, Any] = collection[i]
snake_case: Dict = 0
snake_case: int = i - 1
while low <= high:
snake_case: List[Any] = (low + high) // 2
if val < collection[mid]:
snake_case: Optional[Any] = mid - 1
else:
snake_case: Union[str, Any] = mid + 1
for j in range(__A , __A , -1 ):
snake_case: str = collection[j - 1]
snake_case: Optional[int] = val
return collection
if __name__ == "__main__":
__UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted)) | 329 |
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
super().__init__()
snake_case: str = n_token
snake_case: Optional[Any] = d_embed
snake_case: List[str] = d_proj
snake_case: List[Any] = cutoffs + [n_token]
snake_case: Any = [0] + self.cutoffs
snake_case: Union[str, Any] = div_val
snake_case: Optional[int] = self.cutoffs[0]
snake_case: List[Any] = len(self.cutoffs ) - 1
snake_case: Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case: Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case: Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case: Union[str, Any] = nn.ModuleList()
snake_case: List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE__ )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case , snake_case: Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case: Tuple = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , r_idx - l_idx ) )
snake_case: str = keep_order
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if proj is None:
snake_case: str = nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case: List[Any] = nn.functional.linear(SCREAMING_SNAKE_CASE__ , proj.t().contiguous() )
snake_case: Any = nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
snake_case: List[Any] = hidden[..., :-1, :].contiguous()
snake_case: Tuple = labels[..., 1:].contiguous()
snake_case: List[Any] = hidden.view(-1 , hidden.size(-1 ) )
snake_case: Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
snake_case: int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case: List[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case: str = labels != -1_00
snake_case: List[str] = torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device )
snake_case: Tuple = (
-nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case: Optional[int] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
else:
# construct weights and biases
snake_case , snake_case: Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case , snake_case: List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case: List[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case: Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case: Any = self.out_layers[i].weight
snake_case: List[str] = self.out_layers[i].bias
if i == 0:
snake_case: Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case: List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE__ )
biases.append(SCREAMING_SNAKE_CASE__ )
snake_case , snake_case , snake_case: int = weights[0], biases[0], self.out_projs[0]
snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
if labels is None:
snake_case: str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case: int = torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device )
snake_case: Dict = 0
snake_case: List[str] = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
snake_case , snake_case: str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case: Optional[Any] = (labels >= l_idx) & (labels < r_idx)
snake_case: Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case: List[Any] = labels.index_select(0 , SCREAMING_SNAKE_CASE__ ) - l_idx
snake_case: Dict = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE__ )
snake_case: str = hidden.index_select(0 , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = hidden
if i == 0:
if labels is not None:
snake_case: List[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case: Dict = head_logprob[:, : self.cutoffs[0]]
else:
snake_case , snake_case , snake_case: Union[str, Any] = weights[i], biases[i], self.out_projs[i]
snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
snake_case: Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case: List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case: Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case: Optional[int] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , SCREAMING_SNAKE_CASE__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.n_clusters == 0:
snake_case: List[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
else:
# construct weights and biases
snake_case , snake_case: Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case , snake_case: Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case: List[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case: Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case: Optional[int] = self.out_layers[i].weight
snake_case: Optional[Any] = self.out_layers[i].bias
if i == 0:
snake_case: Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case: Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE__ )
biases.append(SCREAMING_SNAKE_CASE__ )
snake_case , snake_case , snake_case: List[Any] = weights[0], biases[0], self.out_projs[0]
snake_case: Optional[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case: Tuple = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
snake_case: Any = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
snake_case , snake_case: Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case: Dict = head_logprob[:, : self.cutoffs[0]]
else:
snake_case , snake_case , snake_case: Optional[Any] = weights[i], biases[i], self.out_projs[i]
snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
snake_case: Tuple = head_logprob[:, -i] + tail_logprob_i
snake_case: Union[str, Any] = logprob_i
return out | 329 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = int(A__ )
if n_element < 1:
UpperCamelCase = ValueError('a should be a positive number' )
raise my_error
UpperCamelCase = [1]
UpperCamelCase , UpperCamelCase , UpperCamelCase = (0, 0, 0)
UpperCamelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_lowerCamelCase : Dict = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 704 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase : Tuple = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase : str = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="auto" , UpperCamelCase__ : List[str]=-1 , UpperCamelCase__ : int=0.9 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Union[str, Any]=5_0_0 , UpperCamelCase__ : Union[str, Any]="gpt2-large" , UpperCamelCase__ : Union[str, Any]=-1 , UpperCamelCase__ : Dict=1_0_2_4 , UpperCamelCase__ : Dict=2_5 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=2_5 , ):
"""simple docstring"""
UpperCamelCase = compute_mauve(
p_text=UpperCamelCase__ , q_text=UpperCamelCase__ , p_features=UpperCamelCase__ , q_features=UpperCamelCase__ , p_tokens=UpperCamelCase__ , q_tokens=UpperCamelCase__ , num_buckets=UpperCamelCase__ , pca_max_data=UpperCamelCase__ , kmeans_explained_var=UpperCamelCase__ , kmeans_num_redo=UpperCamelCase__ , kmeans_max_iter=UpperCamelCase__ , featurize_model_name=UpperCamelCase__ , device_id=UpperCamelCase__ , max_text_length=UpperCamelCase__ , divergence_curve_discretization_size=UpperCamelCase__ , mauve_scaling_factor=UpperCamelCase__ , verbose=UpperCamelCase__ , seed=UpperCamelCase__ , )
return out
| 324 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCAmelCase_ ( cls )-> int:
'''simple docstring'''
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def UpperCAmelCase_ ( cls )-> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 |
"""simple docstring"""
def snake_case ( _a: float , _a: float )-> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(1_25.50, 0.05) = }""")
| 510 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = (KDPMaDiscreteScheduler,)
snake_case_ = 1_0
def _UpperCAmelCase ( self : Dict , **snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case )
return config
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def _UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def _UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def _UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : Optional[int] = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ : Optional[Any] = self.dummy_model()
__magic_name__ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ : Tuple = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : Tuple = scheduler.scale_model_input(snake_case , snake_case )
__magic_name__ : Tuple = model(snake_case , snake_case )
__magic_name__ : Optional[int] = scheduler.step(snake_case , snake_case , snake_case )
__magic_name__ : str = output.prev_sample
__magic_name__ : Tuple = torch.sum(torch.abs(snake_case ) )
__magic_name__ : int = torch.mean(torch.abs(snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def _UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
__magic_name__ : List[str] = self.scheduler_classes[0]
__magic_name__ : str = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ : int = self.dummy_model()
__magic_name__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ : str = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
__magic_name__ : int = model(snake_case , snake_case )
__magic_name__ : Dict = scheduler.step(snake_case , snake_case , snake_case )
__magic_name__ : int = output.prev_sample
__magic_name__ : Dict = torch.sum(torch.abs(snake_case ) )
__magic_name__ : Any = torch.mean(torch.abs(snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
__magic_name__ : Optional[Any] = self.scheduler_classes[0]
__magic_name__ : Optional[int] = self.get_scheduler_config()
__magic_name__ : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : Union[str, Any] = self.dummy_sample_deter.to(snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__ : int = scheduler.scale_model_input(snake_case , snake_case )
__magic_name__ : List[str] = model(snake_case , snake_case )
__magic_name__ : int = scheduler.step(snake_case , snake_case , snake_case )
__magic_name__ : Optional[int] = output.prev_sample
__magic_name__ : Optional[int] = torch.sum(torch.abs(snake_case ) )
__magic_name__ : List[Any] = torch.mean(torch.abs(snake_case ) )
if str(snake_case ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 147 |
"""simple docstring"""
from math import loga
def UpperCamelCase_ ( lowerCamelCase : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __magic_name__ ( UpperCamelCase : List[str] ) -> Optional[Any]:
a__ = FileLock(str(tmpdir / 'foo.lock' ) )
a__ = FileLock(str(tmpdir / 'foo.lock' ) )
a__ = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase ):
a__ = time.time()
locka.acquire(UpperCamelCase )
assert time.time() - _start > timeout
def __magic_name__ ( UpperCamelCase : Tuple ) -> int:
a__ = 'a' * 1000 + '.lock'
a__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
a__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase ):
locka.acquire(0 )
| 273 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a_ :
def __init__( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : int=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=9_9 , __lowerCAmelCase : List[str]=3_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple="None" , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = relative_attention
__snake_case = position_biased_input
__snake_case = pos_att_type
__snake_case = scope
def lowercase__ ( self : Dict ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
__snake_case = TFDebertaVaModel(config=__lowerCAmelCase )
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = [input_ids, input_mask]
__snake_case = model(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
__snake_case = TFDebertaVaForMaskedLM(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
__snake_case = self.num_labels
__snake_case = TFDebertaVaForSequenceClassification(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
__snake_case = self.num_labels
__snake_case = TFDebertaVaForTokenClassification(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ):
__snake_case = TFDebertaVaForQuestionAnswering(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : int = False
lowercase_ : List[Any] = False
def lowercase__ ( self : Any ):
__snake_case = TFDebertaVaModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : List[str] ):
__snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a_ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def lowercase__ ( self : List[str] ):
pass
@slow
def lowercase__ ( self : int ):
__snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
__snake_case = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
__snake_case = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 )
| 356 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase = random.Random()
def __UpperCAmelCase ( a_ , a_=1.0 , a_=None , a_=None):
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=7 , a=4_00 , a=20_00 , a=1 , a=0.0 , a=1_60_00 , a=True , a=80 , a=16 , a=64 , a="hann_window" , a=80 , a=76_00 , a=1E-10 , a=True , ) -> Any:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = do_normalize
snake_case_ = num_mel_bins
snake_case_ = hop_length
snake_case_ = win_length
snake_case_ = win_function
snake_case_ = fmin
snake_case_ = fmax
snake_case_ = mel_floor
snake_case_ = return_attention_mask
def _UpperCamelCase ( self ) -> str:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _UpperCamelCase ( self , a=False , a=False ) -> Tuple:
def _flatten(a ):
return list(itertools.chain(*a ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
def _UpperCamelCase ( self , a=False , a=False ) -> str:
if equal_length:
snake_case_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = SpeechTaFeatureExtractor
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = SpeechTaFeatureExtractionTester(self )
def _UpperCamelCase ( self , a ) -> Tuple:
self.assertTrue(np.all(np.mean(a , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a , axis=0 ) - 1 ) < 1E-3 ) )
def _UpperCamelCase ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = ['longest', 'max_length', 'do_not_pad']
snake_case_ = [None, 16_00, None]
for max_length, padding in zip(a , a ):
snake_case_ = feat_extract(a , padding=a , max_length=a , return_tensors='np' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = range(8_00 , 14_00 , 2_00 )
snake_case_ = [floats_list((1, x) )[0] for x in lengths]
snake_case_ = ['longest', 'max_length', 'do_not_pad']
snake_case_ = [None, 16_00, None]
for max_length, padding in zip(a , a ):
snake_case_ = feat_extract(a , max_length=a , padding=a )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = feat_extract(
a , truncation=a , max_length=10_00 , padding='max_length' , return_tensors='np' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = feat_extract(
a , truncation=a , max_length=10_00 , padding='longest' , return_tensors='np' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = feat_extract(
a , truncation=a , max_length=20_00 , padding='longest' , return_tensors='np' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_00 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(a ) for speech_input in speech_inputs]
# Test feature size
snake_case_ = feature_extractor(audio_target=a , padding=a , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
snake_case_ = feature_extractor(a , return_tensors='np' ).input_values
snake_case_ = feature_extractor(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case_ = np.asarray(a )
snake_case_ = feature_extractor(a , return_tensors='np' ).input_values
snake_case_ = feature_extractor(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a ) == len(a ) for x, y in zip(a , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(a , padding='longest' , return_tensors='np' )[input_name]
snake_case_ = feat_extract.pad(a , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**a )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = [len(a ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(a , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**a )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = [len(a ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(a )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(
a , padding='max_length' , max_length=a , truncation=a , return_tensors='np' )
self.assertIn('attention_mask' , a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _UpperCamelCase ( self , a ) -> Optional[Any]:
from datasets import load_dataset
snake_case_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case_ = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self ) -> Optional[Any]:
# fmt: off
snake_case_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = feature_extractor(a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , a , atol=1E-6 ) )
def _UpperCamelCase ( self ) -> Any:
# fmt: off
snake_case_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = feature_extractor(audio_target=a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1E-4 ) )
| 607 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCAmelCase ( a_):
if isinstance(a_ , torch.Tensor):
return image
elif isinstance(a_ , PIL.Image.Image):
snake_case_ = [image]
snake_case_ = [trans(img.convert('RGB')) for img in image]
snake_case_ = torch.stack(a_)
return image
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
def _UpperCamelCase ( self , a ) -> List[str]:
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self , a , a , a ) -> Any:
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength ) , a )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self , a , a , a , a , a , a=None ) -> List[Any]:
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}''' )
snake_case_ = image.to(device=a , dtype=a )
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
print('add noise to latents at timestep' , a )
snake_case_ = self.scheduler.add_noise(a , a , a )
snake_case_ = init_latents
return latents
@torch.no_grad()
def __call__( self , a = None , a = 0.8 , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(a )
# 2. Preprocess image
snake_case_ = preprocess(a )
# 3. set timesteps
self.scheduler.set_timesteps(a , device=self.device )
snake_case_ , snake_case_ = self.get_timesteps(a , a , self.device )
snake_case_ = timesteps[:1].repeat(a )
# 4. Prepare latent variables
snake_case_ = self.prepare_latents(a , a , a , self.unet.dtype , self.device , a )
snake_case_ = latents
# 5. Denoising loop
for t in self.progress_bar(a ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a , ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a )
| 607 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyInpaintPipeline
__snake_case = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case = False
@property
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
return 32
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
return 32
@property
def lowerCAmelCase__ ( self: str ) -> Any:
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: str ) -> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Any ) -> Dict:
return 100
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
__magic_name__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__magic_name__ : int = MultilingualCLIP(UpperCAmelCase_ )
__magic_name__ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
torch.manual_seed(0 )
__magic_name__ : List[str] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__magic_name__ : str = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: str ) -> List[str]:
__magic_name__ : Dict = self.dummy_text_encoder
__magic_name__ : List[Any] = self.dummy_tokenizer
__magic_name__ : Optional[int] = self.dummy_unet
__magic_name__ : Tuple = self.dummy_movq
__magic_name__ : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase_ , )
__magic_name__ : Any = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase__ ( self: Any , __UpperCamelCase: str , __UpperCamelCase: Optional[Any]=0 ) -> List[str]:
__magic_name__ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
__magic_name__ : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
__magic_name__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
__magic_name__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : Any = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
__magic_name__ : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : List[Any] = 0
if str(UpperCAmelCase_ ).startswith("mps" ):
__magic_name__ : List[str] = torch.manual_seed(UpperCAmelCase_ )
else:
__magic_name__ : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
__magic_name__ : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
__magic_name__ : Dict = 'cpu'
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : Tuple = self.pipeline_class(**UpperCAmelCase_ )
__magic_name__ : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__magic_name__ : int = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
__magic_name__ : int = output.images
__magic_name__ : str = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
__magic_name__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : int = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
__magic_name__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__magic_name__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__magic_name__ : Dict = np.ones((768, 768) , dtype=np.floataa )
__magic_name__ : int = 0
__magic_name__ : Optional[int] = 'a hat'
__magic_name__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
__magic_name__ : List[Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__magic_name__ : Union[str, Any] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
__magic_name__ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__magic_name__ : Optional[int] = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
__magic_name__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ ) | 436 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
lowerCAmelCase : int = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase ) if (len(_UpperCAmelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ), 'Stack'.center(_UpperCAmelCase ), 'Postfix'.center(_UpperCAmelCase ), sep=' | ', )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase ) == 0:
stack.append(_UpperCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_UpperCAmelCase ) # push x to stack
print(
x.center(8 ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), sep=' | ', ) # Output in tabular format
while len(_UpperCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), sep=' | ', ) # Output in tabular format
return "".join(_UpperCAmelCase ) # return Postfix as str
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_UpperCAmelCase ) ):
if infix[i] == "(":
lowerCAmelCase : int = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCAmelCase : List[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(_UpperCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A : Optional[Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__A : str = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 343 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__A : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowercase__ : Union[str, Any] , **lowercase__ : Optional[Any] ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 715 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A : int = '\nHuman: <<task>>\n\nAssistant: '
__A : List[str] = 'huggingface-tools/default-prompts'
__A : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase="run" ) ->Optional[int]:
"""simple docstring"""
if prompt_or_repo_id is None:
__lowercase : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s", _lowerCamelCase ) is not None:
return prompt_or_repo_id
__lowercase : Optional[Any] = cached_file(
_lowerCamelCase, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name} )
with open(_lowerCamelCase, "r", encoding="utf-8" ) as f:
return f.read()
| 281 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_A = logging.get_logger(__name__) # pylint: disable=invalid-name
_A = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _lowerCAmelCase ( __a ):
_lowercase =42
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
if latents is None:
lowerCAmelCase_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase_ = latents.to(_UpperCamelCase )
lowerCAmelCase_ = latents * scheduler.init_noise_sigma
return latents
def __a ( self , _UpperCamelCase=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCAmelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __a ( self ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
lowerCAmelCase_ = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
lowerCAmelCase_ = self.image_processor(_UpperCamelCase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCAmelCase_ = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
lowerCAmelCase_ = self.image_encoder(_UpperCamelCase )["last_hidden_state"]
lowerCAmelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase_ = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 25 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 64 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Optional[int]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
lowerCAmelCase_ = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
lowerCAmelCase_ = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCAmelCase_ = len(_UpperCamelCase )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}""" )
lowerCAmelCase_ = self._execution_device
lowerCAmelCase_ = batch_size * num_images_per_prompt
lowerCAmelCase_ = guidance_scale > 1.0
lowerCAmelCase_ = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
lowerCAmelCase_ = self.scheduler.timesteps
lowerCAmelCase_ = self.prior.config.num_embeddings
lowerCAmelCase_ = self.prior.config.embedding_dim
lowerCAmelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase_ = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
lowerCAmelCase_ , lowerCAmelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase_ , lowerCAmelCase_ = noise_pred.chunk(2 )
lowerCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase_ = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
lowerCAmelCase_ = []
for i, latent in enumerate(_UpperCamelCase ):
print()
lowerCAmelCase_ = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_UpperCamelCase )
lowerCAmelCase_ = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCAmelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase_ = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 290 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase="None" , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Tuple:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = relative_attention
lowerCAmelCase_ = position_biased_input
lowerCAmelCase_ = pos_att_type
lowerCAmelCase_ = scope
def __a ( self ) -> List[str]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
lowerCAmelCase_ = TFDebertaVaModel(config=_UpperCamelCase )
lowerCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ = [input_ids, input_mask]
lowerCAmelCase_ = model(_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = TFDebertaVaForMaskedLM(config=_UpperCamelCase )
lowerCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = TFDebertaVaForSequenceClassification(config=_UpperCamelCase )
lowerCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = TFDebertaVaForTokenClassification(config=_UpperCamelCase )
lowerCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
lowerCAmelCase_ = TFDebertaVaForQuestionAnswering(config=_UpperCamelCase )
lowerCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __a , __a , unittest.TestCase ):
_lowercase =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase =(
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase =False
_lowercase =False
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = TFDebertaVaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __a ( self ) -> str:
self.config_tester.run_common_tests()
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __a ( self ) -> List[str]:
pass
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
lowerCAmelCase_ = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCAmelCase_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
lowerCAmelCase_ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4 )
| 290 | 1 |
"""simple docstring"""
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = set()
# edges = list of graph's edges
A_ : Union[str, Any] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
A_ , A_ : Optional[Any] = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 361 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : List[Any] = emb.weight.shape
A_ : List[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
A_ : Any = emb.weight.data
return lin_layer
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = torch.load(_UpperCAmelCase , map_location='''cpu''' )
A_ : Any = Namespace(**checkpoint['''cfg''']['''model'''] )
A_ : List[str] = checkpoint['''model''']
remove_ignore_keys_(_UpperCAmelCase )
A_ : Union[str, Any] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
A_ : List[str] = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
A_ : int = XGLMConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A_ : Any = XGLMForCausalLM(_UpperCAmelCase )
A_ : int = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
print(_UpperCAmelCase )
A_ : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase : int = parser.parse_args()
_lowerCamelCase : Optional[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 361 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase = """\
"""
UpperCamelCase = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__=None ) -> List[str]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = "cuda"
else:
A__ = "cuda" if torch.cuda.is_available() else "cpu"
A__ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = model.to(SCREAMING_SNAKE_CASE__ )
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(SCREAMING_SNAKE_CASE__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors="pt" , return_attention_mask=SCREAMING_SNAKE_CASE__ , ).to(SCREAMING_SNAKE_CASE__ )
A__ = encodings["input_ids"]
A__ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ):
A__ = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE__ ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE__ )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE__ ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE__ )}
| 104 |
'''simple docstring'''
import numpy as np
def A__ ( A_ ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A__ ( __lowerCamelCase ):
return (data["data"], data["target"])
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = XGBClassifier()
classifier.fit(__lowerCamelCase, __lowerCamelCase )
return classifier
def A__ ( ):
SCREAMING_SNAKE_CASE_ = load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data_handling(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = train_test_split(
__lowerCamelCase, __lowerCamelCase, test_size=0.25 )
SCREAMING_SNAKE_CASE_ = iris['''target_names''']
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_ = xgboost(__lowerCamelCase, __lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, display_labels=__lowerCamelCase, cmap='''Blues''', normalize='''true''', )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 721 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline
UpperCAmelCase_ =["image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase_ =[
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase_ =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ =False
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return 32
@property
def _UpperCamelCase ( self ) -> Tuple:
return 32
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> List[Any]:
return 100
@property
def _UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**_A )
return model
@property
def _UpperCamelCase ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE_ = DDIMScheduler(**_A )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCamelCase ( self , _A , _A=0 ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) )
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_A ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE_ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
SCREAMING_SNAKE_CASE_ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 597 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 61 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a="</s>" , a="<unk>" , a="<pad>" , a=125 , a=None , **a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Any = [f"""<extra_id_{i}>""" for i in range(a)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase__ : List[Any] = len(set(filter(lambda a: bool('extra_id' in str(a)) , a)))
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens')
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else pad_token
lowercase__ : Tuple = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else eos_token
lowercase__ : str = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else unk_token
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , **a , )
lowercase__ : str = extra_ids
lowercase__ : Any = 2**8 # utf is 8 bits
# define special tokens dict
lowercase__ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowercase__ : Optional[Any] = len(self.special_tokens_encoder)
lowercase__ : List[str] = len(a)
for i, token in enumerate(a):
lowercase__ : List[Any] = self.vocab_size + i - n
lowercase__ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case_ ( self):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case_ ( self , a , a = None , a = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a)) + [1]
return ([0] * len(a)) + [1] + ([0] * len(a)) + [1]
def snake_case_ ( self , a):
if len(a) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case_ ( self , a , a = None):
lowercase__ : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def snake_case_ ( self , a , a = None):
lowercase__ : List[str] = self._add_eos_if_not_present(a)
if token_ids_a is None:
return token_ids_a
else:
lowercase__ : Union[str, Any] = self._add_eos_if_not_present(a)
return token_ids_a + token_ids_a
def snake_case_ ( self , a):
lowercase__ : Any = [chr(a) for i in text.encode('utf-8')]
return tokens
def snake_case_ ( self , a):
if token in self.special_tokens_encoder:
lowercase__ : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowercase__ : Tuple = self.added_tokens_encoder[token]
elif len(a) != 1:
lowercase__ : Tuple = self.unk_token_id
else:
lowercase__ : Optional[Any] = ord(a) + self._num_special_tokens
return token_id
def snake_case_ ( self , a):
if index in self.special_tokens_decoder:
lowercase__ : Dict = self.special_tokens_decoder[index]
else:
lowercase__ : List[Any] = chr(index - self._num_special_tokens)
return token
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = B''
for token in tokens:
if token in self.special_tokens_decoder:
lowercase__ : Optional[int] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.added_tokens_decoder:
lowercase__ : Union[str, Any] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.special_tokens_encoder:
lowercase__ : List[str] = token.encode('utf-8')
elif token in self.added_tokens_encoder:
lowercase__ : Optional[int] = token.encode('utf-8')
else:
lowercase__ : Optional[Any] = bytes([ord(a)])
bstring += tok_string
lowercase__ : List[Any] = bstring.decode('utf-8' , errors='ignore')
return string
def snake_case_ ( self , a , a = None):
return ()
| 164 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase :List[Any] = parser.parse_args()
_lowerCAmelCase :str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 506 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 88 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 32 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "geglu" , lowerCAmelCase : Optional[int] = None , ):
super().__init__()
lowerCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase , attention_head_dim=lowerCAmelCase , in_channels=lowerCAmelCase , num_layers=lowerCAmelCase , dropout=lowerCAmelCase , norm_num_groups=lowerCAmelCase , cross_attention_dim=lowerCAmelCase , attention_bias=lowerCAmelCase , sample_size=lowerCAmelCase , num_vector_embeds=lowerCAmelCase , activation_fn=lowerCAmelCase , num_embeds_ada_norm=lowerCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase = [1, 0]
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=None , lowerCAmelCase : bool = True , ):
lowerCAmelCase = hidden_states
lowerCAmelCase = []
lowerCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase = self.transformer_index_for_condition[i]
lowerCAmelCase = self.transformers[transformer_index](
lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , timestep=lowerCAmelCase , cross_attention_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase )
| 169 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__a = False
__a = True
__a = False
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__a = parser.parse_args()
__a = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__a = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__a = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__a = reader.read()
__a = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__a = UNetaDModel(**config)
else:
__a = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__a = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__a = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__a = config[key]
del config[key]
__a = [k.replace('UNetRes', '') for k in config['down_block_types']]
__a = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__a = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__a = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__a = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__a = param_value
__a = True
if not has_changed:
__a = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 708 |
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 300 | 0 |
from statistics import mean
import numpy as np
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[Any] = 0
# Number of processes finished
__lowerCAmelCase : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCAmelCase : Optional[Any] = [0] * no_of_process
# List to include calculation results
__lowerCAmelCase : Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
__lowerCAmelCase : Any = [burst_time[i] for i in np.argsort(lowercase__ )]
__lowerCAmelCase : List[Any] = [process_name[i] for i in np.argsort(lowercase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCAmelCase : List[str] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCAmelCase : Dict = arrival_time[i]
__lowerCAmelCase : Any = 0
# Index showing the location of the process being performed
__lowerCAmelCase : List[str] = 0
# Saves the current response ratio.
__lowerCAmelCase : List[str] = 0
for i in range(0 , lowercase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCAmelCase : List[str] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCAmelCase : Optional[Any] = temp
__lowerCAmelCase : Dict = i
# Calculate the turn around time
__lowerCAmelCase : Union[str, Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCAmelCase : List[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = [0] * no_of_process
for i in range(0 , lowercase__ ):
__lowerCAmelCase : Dict = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCamelCase = 5
_UpperCamelCase = ["A", "B", "C", "D", "E"]
_UpperCamelCase = [1, 2, 3, 4, 5]
_UpperCamelCase = [1, 2, 3, 4, 5]
_UpperCamelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCamelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
F"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(F"average waiting time : {mean(waiting_time):.5f}")
print(F"average turn around time : {mean(turn_around_time):.5f}")
| 492 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( lowercase__ , lowercase__ , lowercase__=0 ):
# Format the message.
if name is None:
__lowerCAmelCase : Any = None
else:
__lowerCAmelCase : Union[str, Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}'''
__lowerCAmelCase : Dict = fmt.format(lowercase__ )
# Print and recurse (if needed).
if isinstance(lowercase__ , lowercase__ ):
if msg is not None:
print(lowercase__ )
for k in val.keys():
recursive_print(lowercase__ , val[k] , spaces + 2 )
elif isinstance(lowercase__ , torch.Tensor ):
print(lowercase__ , ''':''' , val.size() )
else:
print(lowercase__ , ''':''' , lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__lowerCAmelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__lowerCAmelCase : Tuple = (num_heads, hidden_size, num_splits) + input_shape[1:]
__lowerCAmelCase : int = param.view(*lowercase__ )
__lowerCAmelCase : int = param.transpose(0 , 2 )
__lowerCAmelCase : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__lowerCAmelCase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__lowerCAmelCase : str = param.view(*lowercase__ )
__lowerCAmelCase : List[str] = param.transpose(0 , 1 ).contiguous()
__lowerCAmelCase : List[str] = param.view(*lowercase__ )
return param
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
# The converted output model.
__lowerCAmelCase : Any = {}
# old versions did not store training args
__lowerCAmelCase : Optional[int] = input_state_dict.get('''args''' , lowercase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__lowerCAmelCase : List[str] = ds_args.padded_vocab_size
__lowerCAmelCase : List[str] = ds_args.max_position_embeddings
__lowerCAmelCase : Optional[int] = ds_args.hidden_size
__lowerCAmelCase : Tuple = ds_args.num_layers
__lowerCAmelCase : int = ds_args.num_attention_heads
__lowerCAmelCase : Union[str, Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__lowerCAmelCase : Union[str, Any] = config.n_head
# The hidden_size per head.
__lowerCAmelCase : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__lowerCAmelCase : Union[str, Any] = input_state_dict['''checkpoint_version''']
else:
__lowerCAmelCase : Optional[Any] = 0.0
# The model.
__lowerCAmelCase : int = input_state_dict['''model''']
# The language model.
__lowerCAmelCase : str = model['''language_model''']
# The embeddings.
__lowerCAmelCase : Optional[Any] = lm['''embedding''']
# The word embeddings.
__lowerCAmelCase : Tuple = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
__lowerCAmelCase : Dict = word_embeddings[: config.vocab_size, :]
__lowerCAmelCase : int = word_embeddings
# The position embeddings.
__lowerCAmelCase : Optional[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__lowerCAmelCase : Dict = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
__lowerCAmelCase : Any = pos_embeddings
# The transformer.
__lowerCAmelCase : Dict = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
__lowerCAmelCase : Any = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
__lowerCAmelCase : Optional[Any] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__lowerCAmelCase : str = layer_re.match(lowercase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__lowerCAmelCase : Tuple = int(m.group(1 ) )
# The name of the operation.
__lowerCAmelCase : Dict = m.group(2 )
# Is it a weight or a bias?
__lowerCAmelCase : Optional[Any] = m.group(3 )
# The name of the layer.
__lowerCAmelCase : Dict = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
__lowerCAmelCase : List[Any] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
__lowerCAmelCase : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__lowerCAmelCase : List[str] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowercase__ , lowercase__ )
__lowerCAmelCase : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__lowerCAmelCase : str = torch.tensor(-1E4 , dtype=torch.floataa )
__lowerCAmelCase : str = masked_bias
__lowerCAmelCase : Tuple = fix_query_key_value_ordering(lowercase__ , lowercase__ , 3 , lowercase__ , lowercase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__lowerCAmelCase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
__lowerCAmelCase : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__lowerCAmelCase : List[str] = fix_query_key_value_ordering(lowercase__ , lowercase__ , 3 , lowercase__ , lowercase__ )
# Store. No change of shape.
__lowerCAmelCase : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__lowerCAmelCase : List[Any] = megatron_to_transformers[op_name]
__lowerCAmelCase : Optional[int] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__lowerCAmelCase : Optional[int] = megatron_to_transformers[op_name]
__lowerCAmelCase : Any = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__lowerCAmelCase : str = transformer['''final_layernorm.weight''']
__lowerCAmelCase : List[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
__lowerCAmelCase : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ):
# Create the argument parser.
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=lowercase__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=lowercase__ , help='''An optional config json file describing the pre-trained model.''' , )
__lowerCAmelCase : Optional[int] = parser.parse_args()
# Extract the basename.
__lowerCAmelCase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
__lowerCAmelCase : Union[str, Any] = torch.load(lowercase__ , map_location='''cpu''' )
else:
__lowerCAmelCase : Optional[int] = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
__lowerCAmelCase : int = input_state_dict.get('''args''' , lowercase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__lowerCAmelCase : List[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
__lowerCAmelCase : Union[str, Any] = '''gelu_new'''
else:
__lowerCAmelCase : str = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
__lowerCAmelCase : int = '''gelu_new'''
# Spell out all parameters in case the defaults change.
__lowerCAmelCase : str = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=lowercase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type='''cls_index''' , summary_use_proj=lowercase__ , summary_activation=lowercase__ , summary_proj_to_labels=lowercase__ , summary_first_dropout=0.1 , scale_attn_weights=lowercase__ , use_cache=lowercase__ , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
__lowerCAmelCase : List[str] = GPTaConfig.from_json_file(args.config_file )
__lowerCAmelCase : List[str] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
__lowerCAmelCase : List[Any] = convert_megatron_checkpoint(lowercase__ , lowercase__ , lowercase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase__ , lowercase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__lowerCAmelCase : Any = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__lowerCAmelCase : Any = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
__lowerCAmelCase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
__lowerCAmelCase : str = '''gpt2'''
__lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowercase__ )
__lowerCAmelCase : Tuple = type(lowercase__ ).__name__
__lowerCAmelCase : Dict = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(lowercase__ )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(lowercase__ )
# Store the state_dict to file.
__lowerCAmelCase : List[str] = os.path.join(lowercase__ , '''pytorch_model.bin''' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(lowercase__ , lowercase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 492 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
__UpperCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> Any:
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE : str = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
SCREAMING_SNAKE_CASE : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : int , snake_case_ : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Tuple = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : str = name.split(snake_case_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE : List[Any] = mapped_key.replace('*' , snake_case_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[Any] = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Optional[int] = 'weight'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : int ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE : Dict = name.split('.' )
SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str=None , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=True ) -> List[str]:
if config_path is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE : Any = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE : List[Any] = ''
if is_finetuned:
SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForCTC(snake_case_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForPreTraining(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
SCREAMING_SNAKE_CASE : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 703 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__UpperCAmelCase = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__UpperCAmelCase = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE : Any = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[Any] = numpy_to_pil(snake_case_ )
return images
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Any:
if images.ndim == 3:
SCREAMING_SNAKE_CASE : Optional[Any] = images[None, ...]
SCREAMING_SNAKE_CASE : Optional[int] = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 220 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_A: Any = ['''small''', '''medium''', '''large''']
_A: Dict = '''lm_head.decoder.weight'''
_A: Any = '''lm_head.weight'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Tuple:
__UpperCAmelCase = torch.load(A_ )
__UpperCAmelCase = d.pop(A_ )
os.makedirs(A_ , exist_ok=A_ )
torch.save(A_ , os.path.join(A_ , A_ ) )
if __name__ == "__main__":
_A: int = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
_A: str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_A: Tuple = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
_A: List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 126 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any =parent
UpperCamelCase__ : Any =batch_size
UpperCamelCase__ : Optional[int] =image_size
UpperCamelCase__ : Optional[int] =num_channels
UpperCamelCase__ : Tuple =embeddings_size
UpperCamelCase__ : Union[str, Any] =hidden_sizes
UpperCamelCase__ : int =depths
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : List[str] =use_labels
UpperCamelCase__ : Any =hidden_act
UpperCamelCase__ : str =num_labels
UpperCamelCase__ : Any =scope
UpperCamelCase__ : int =len(__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : Any =None
if self.use_labels:
UpperCamelCase__ : List[str] =ids_tensor([self.batch_size] , self.num_labels)
UpperCamelCase__ : Optional[int] =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict =TFResNetModel(config=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =model(__SCREAMING_SNAKE_CASE)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] =self.num_labels
UpperCamelCase__ : Dict =TFResNetForImageClassification(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple =self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =config_and_inputs
UpperCamelCase__ : Union[str, Any] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str =TFResNetModelTester(self)
UpperCamelCase__ : List[Any] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds")
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings")
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] =model_class(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Dict =[*signature.parameters.keys()]
UpperCamelCase__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : List[Any] =model_class(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
UpperCamelCase__ : Optional[int] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] =["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : List[str] =layer_type
UpperCamelCase__ : Optional[Any] =True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int =True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[Any] =TFResNetModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
UpperCamelCase__ : Tuple =self.default_image_processor
UpperCamelCase__ : Any =prepare_img()
UpperCamelCase__ : int =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="tf")
# forward pass
UpperCamelCase__ : Optional[int] =model(**__SCREAMING_SNAKE_CASE)
# verify the logits
UpperCamelCase__ : Tuple =tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =tf.constant([-11.10_69, -9.78_77, -8.37_77])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 582 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( A_ : Any ) -> str:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( A_ : Optional[Any] , A_ : int ) -> str:
'''simple docstring'''
return (-y * np.log(A_ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( A_ : Any , A_ : Union[str, Any] , A_ : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =np.dot(A_ , A_ )
return np.sum(y * scores - np.log(1 + np.exp(A_ ) ) )
def _lowerCamelCase ( A_ : Optional[int] , A_ : List[str] , A_ : Any , A_ : Dict=7_0_0_0_0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple =np.zeros(x.shape[1] )
for iterations in range(A_ ):
UpperCamelCase__ : List[Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Optional[int] =sigmoid_function(A_ )
UpperCamelCase__ : Optional[Any] =np.dot(x.T , h - y ) / y.size
UpperCamelCase__ : Optional[int] =theta - alpha * gradient # updating the weights
UpperCamelCase__ : Union[str, Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Any =sigmoid_function(A_ )
UpperCamelCase__ : Dict =cost_function(A_ , A_ )
if iterations % 1_0_0 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = iris.data[:, :2]
__UpperCAmelCase = (iris.target != 0) * 1
__UpperCAmelCase = 0.1
__UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return sigmoid_function(
np.dot(A_ , A_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 582 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def __A ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Optional[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : List[str] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : str = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Any = x_minus if isclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else x_plus
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __A ( _SCREAMING_SNAKE_CASE : float = 1.4 , _SCREAMING_SNAKE_CASE : float = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
__SCREAMING_SNAKE_CASE : Dict = next_point(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 211 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Union[str, Any]=[32, 64, 128] , lowerCAmelCase : Tuple=[1, 2, 1] , lowerCAmelCase : Dict=[2, 2, 4] , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=2.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : Any=True , lowerCAmelCase : Dict=None , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : str=8 , lowerCAmelCase : int=["stage1", "stage2"] , lowerCAmelCase : List[str]=[1, 2] , ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Dict = image_size
_snake_case : Tuple = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : Dict = embed_dim
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : int = depths
_snake_case : Tuple = num_heads
_snake_case : Any = window_size
_snake_case : int = mlp_ratio
_snake_case : Union[str, Any] = qkv_bias
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : List[str] = drop_path_rate
_snake_case : Union[str, Any] = hidden_act
_snake_case : Any = use_absolute_embeddings
_snake_case : Dict = patch_norm
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = is_training
_snake_case : Dict = scope
_snake_case : Any = use_labels
_snake_case : int = type_sequence_label_size
_snake_case : int = encoder_stride
_snake_case : Optional[Any] = out_features
_snake_case : Any = out_indices
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_snake_case : Dict = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = FocalNetModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[Any] = model(lowerCAmelCase)
_snake_case : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_snake_case : Any = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
_snake_case : Any = FocalNetBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : str = FocalNetBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : List[str] = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : List[str] = model(lowerCAmelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_snake_case : Dict = 1
_snake_case : Union[str, Any] = FocalNetForMaskedImageModeling(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_snake_case : Optional[int] = model(lowerCAmelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = self.type_sequence_label_size
_snake_case : List[str] = FocalNetForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[int] = model(lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_snake_case : List[str] = 1
_snake_case : str = FocalNetForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_snake_case : int = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase_ ( self : Tuple) -> Dict:
"""simple docstring"""
_snake_case : int = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Optional[int] = config_and_inputs
_snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ : Optional[int] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : List[str] = False
snake_case_ : Dict = False
snake_case_ : str = False
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
_snake_case : List[str] = FocalNetModelTester(self)
_snake_case : List[str] = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37 , has_text_modality=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return
def UpperCamelCase_ ( self : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple) -> int:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase)
@unittest.skip(reason="""FocalNet does not use inputs_embeds""")
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""")
def UpperCamelCase_ ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_snake_case : List[str] = model_class(lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear))
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_snake_case : Optional[int] = model_class(lowerCAmelCase)
_snake_case : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Dict = [*signature.parameters.keys()]
_snake_case : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
_snake_case : str = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
with torch.no_grad():
_snake_case : int = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
_snake_case : List[Any] = outputs.hidden_states
_snake_case : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
# FocalNet has a different seq_length
_snake_case : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_snake_case : int = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
_snake_case , _snake_case , _snake_case , _snake_case : str = reshaped_hidden_states[0].shape
_snake_case : Any = (
reshaped_hidden_states[0].view(lowerCAmelCase , lowerCAmelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_snake_case : int = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_snake_case : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_snake_case : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_snake_case : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_snake_case : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
@slow
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = FocalNetModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[int] = _config_zero_init(lowerCAmelCase)
for model_class in self.all_model_classes:
_snake_case : str = model_class(config=lowerCAmelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Any = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(lowerCAmelCase)
_snake_case : Any = self.default_image_processor
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_snake_case : Optional[int] = image_processor(images=lowerCAmelCase , return_tensors="""pt""").to(lowerCAmelCase)
# forward pass
with torch.no_grad():
_snake_case : List[str] = model(**lowerCAmelCase)
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
_snake_case : str = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : List[str] = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ : Any = FocalNetConfig
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = FocalNetModelTester(self)
| 477 | 0 |
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> list[int]:
snake_case__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase__ , UpperCamelCase__ ):
snake_case__ : Optional[int] = False
return [i for i in range(2 , UpperCamelCase__ ) if is_prime[i]]
def __UpperCAmelCase ( UpperCamelCase__ :int = 10**8 ) -> int:
snake_case__ : Optional[int] = calculate_prime_numbers(max_number // 2 )
snake_case__ : List[Any] = 0
snake_case__ : Tuple = 0
snake_case__ : Tuple = len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 574 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_lowercase : Union[str, Any] =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowercase : List[Any] =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __UpperCAmelCase ( UpperCamelCase__ :list[list[int]] ) -> list[list[int]]:
snake_case__ : str = []
for i in range(len(UpperCamelCase__ ) ):
snake_case__ : int = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case__ : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case__ : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase__ )
return next_generation
def __UpperCAmelCase ( UpperCamelCase__ :list[list[int]] , UpperCamelCase__ :int ) -> list[Image.Image]:
snake_case__ : List[Any] = []
for _ in range(UpperCamelCase__ ):
# Create output image
snake_case__ : Any = Image.new('''RGB''' , (len(cells[0] ), len(UpperCamelCase__ )) )
snake_case__ : Tuple = img.load()
# Save cells to image
for x in range(len(UpperCamelCase__ ) ):
for y in range(len(cells[0] ) ):
snake_case__ : List[str] = 255 - cells[y][x] * 255
snake_case__ : int = (colour, colour, colour)
# Save image
images.append(UpperCamelCase__ )
snake_case__ : Any = new_generation(UpperCamelCase__ )
return images
if __name__ == "__main__":
_lowercase : str =generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 574 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : Dict =list_field(
default=[], metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
}, )
UpperCamelCase_ : Any =list_field(
default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
UpperCamelCase_ : Dict =list_field(
default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'}, )
UpperCamelCase_ : Dict =field(
default=lowercase, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'}, )
UpperCamelCase_ : List[Any] =field(
default=lowercase, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'}, )
UpperCamelCase_ : List[str] =field(
default=lowercase, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
UpperCamelCase_ : str =field(default=lowercase, metadata={'help': 'Use FP16 to accelerate inference.'} )
UpperCamelCase_ : List[str] =field(default=lowercase, metadata={'help': 'Benchmark training of model'} )
UpperCamelCase_ : Any =field(default=lowercase, metadata={'help': 'Verbose memory tracing'} )
UpperCamelCase_ : Tuple =field(
default=lowercase, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'}, )
UpperCamelCase_ : Optional[int] =field(
default=lowercase, metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
}, )
UpperCamelCase_ : Union[str, Any] =field(default=lowercase, metadata={'help': 'Trace memory line by line'} )
UpperCamelCase_ : str =field(default=lowercase, metadata={'help': 'Save result to a CSV file'} )
UpperCamelCase_ : Union[str, Any] =field(default=lowercase, metadata={'help': 'Save all print statements in a log file'} )
UpperCamelCase_ : int =field(default=lowercase, metadata={'help': 'Whether to print environment information'} )
UpperCamelCase_ : Any =field(
default=lowercase, metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
}, )
UpperCamelCase_ : List[str] =field(
default=f'''inference_time_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving time results to csv.'}, )
UpperCamelCase_ : List[str] =field(
default=f'''inference_memory_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving memory results to csv.'}, )
UpperCamelCase_ : str =field(
default=f'''train_time_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving time results to csv for training.'}, )
UpperCamelCase_ : Optional[Any] =field(
default=f'''train_memory_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving memory results to csv for training.'}, )
UpperCamelCase_ : int =field(
default=f'''env_info_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving environment information.'}, )
UpperCamelCase_ : Any =field(
default=f'''log_{round(time() )}.csv''', metadata={'help': 'Log filename used if print statements are saved in log.'}, )
UpperCamelCase_ : int =field(default=3, metadata={'help': 'Times an experiment will be run.'} )
UpperCamelCase_ : List[str] =field(
default=lowercase, metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
}, )
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _UpperCAmelCase , )
def UpperCAmelCase ( self ) -> Optional[Any]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCAmelCase ( self ) -> List[str]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 658 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def __a ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None ) -> Any:
'''simple docstring'''
if "." in tensor_name:
lowercase_ = tensor_name.split("." )
for split in splits[:-1]:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase_ = value.to("cpu" )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
lowercase_ = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
lowercase_ = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase_ = value.to(__lowerCamelCase )
else:
lowercase_ = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def __a ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=False ) -> str:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __a ( __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None ) -> Tuple:
'''simple docstring'''
lowercase_ = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __a ( *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def __a ( *__lowerCamelCase : List[Any] , **__lowerCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def __a ( __lowerCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase_ = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(__lowerCamelCase , [] )
lowercase_ = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase_ = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase_ = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase_ = [".weight", ".bias"]
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 717 | '''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=14 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=99 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_token_type_ids
lowercase_ = use_input_mask
lowercase_ = use_labels
lowercase_ = use_mc_token_ids
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = self.vocab_size - 1
def __UpperCAmelCase ( self : Tuple) -> int:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
if self.use_mc_token_ids:
lowercase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
lowercase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : Tuple) -> List[str]:
lowercase_ = CTRLModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase)
model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowercase_ = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , *__lowerCAmelCase : int) -> int:
lowercase_ = CTRLLMHeadModel(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Dict) -> int:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , *__lowerCAmelCase : List[Any]) -> int:
lowercase_ = self.num_labels
lowercase_ = CTRLForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase_ =(CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase_ =(
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =False
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self : str) -> Tuple:
lowercase_ = CTRLModelTester(self)
lowercase_ = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37)
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
pass
@slow
def __UpperCAmelCase ( self : Dict) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = CTRLModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
pass
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int]) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self : int) -> Any:
lowercase_ = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(__lowerCAmelCase)
lowercase_ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__lowerCAmelCase) # Legal the president is
lowercase_ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase_ = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase)
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase)
| 461 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__a :Optional[Any] = TypeVar('T')
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : str ):
A_ = data
A_ = self
A_ = 0
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ):
# map from node name to the node object
A_ = {}
def __A ( self : List[Any] , UpperCAmelCase : str ):
# create a new set with x as its member
A_ = DisjointSetTreeNode(UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : Optional[int] ):
# find the set x belongs to (with path-compression)
A_ = self.map[data]
if elem_ref != elem_ref.parent:
A_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __A ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str ):
# helper function for union operation
if nodea.rank > nodea.rank:
A_ = nodea
else:
A_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : int ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase ) , self.find_set(UpperCAmelCase ) )
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ):
# connections: map from the node to the neighbouring nodes (with weights)
A_ = {}
def __A ( self : List[str] , UpperCAmelCase : Optional[int] ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
A_ = {}
def __A ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
# add an edge with the given weight
self.add_node(UpperCAmelCase )
self.add_node(UpperCAmelCase )
A_ = weight
A_ = weight
def __A ( self : int ):
A_ = []
A_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase : x[2] )
# creating the disjoint set
A_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase )
# MST generation
A_ = 0
A_ = 0
A_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A_ = edges[index]
index += 1
A_ = disjoint_set.find_set(UpperCAmelCase )
A_ = disjoint_set.find_set(UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
disjoint_set.union(UpperCAmelCase , UpperCAmelCase )
return graph | 86 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
for attribute in key.split(""".""" ):
UpperCAmelCase__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
UpperCAmelCase__ : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
UpperCAmelCase__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase__ : str = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : str = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase__ : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : Dict = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
UpperCAmelCase__ : str = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase__ : Tuple = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
UpperCAmelCase__ : List[str] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : int = "weight_v"
elif "weight" in name:
UpperCAmelCase__ : Tuple = "weight"
elif "bias" in name:
UpperCAmelCase__ : int = "bias"
else:
UpperCAmelCase__ : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ : Union[str, Any] = name.split(""".""" )
UpperCAmelCase__ : List[Any] = int(items[0] )
UpperCAmelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase__ : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase__ : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase__ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ):
if config_path is not None:
UpperCAmelCase__ : List[Any] = HubertConfig.from_pretrained(_lowerCamelCase )
else:
UpperCAmelCase__ : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase__ : Optional[int] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ : Dict = target_dict.pad_index
UpperCAmelCase__ : Any = target_dict.bos_index
UpperCAmelCase__ : Tuple = target_dict.eos_index
UpperCAmelCase__ : int = len(target_dict.symbols )
UpperCAmelCase__ : Tuple = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
UpperCAmelCase__ : Dict = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
UpperCAmelCase__ : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
UpperCAmelCase__ : str = HubertForCTC(_lowerCamelCase )
else:
UpperCAmelCase__ : int = HubertModel(_lowerCamelCase )
if is_finetuned:
UpperCAmelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase__ : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A =parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 717 |
'''simple docstring'''
from itertools import permutations
def _UpperCamelCase ( UpperCamelCase__ ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : Optional[int] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase ( UpperCamelCase__ = 1_0 ):
return sum(
int("""""".join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 113 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ = {'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_ = {'facebook/bart-base': BartTokenizer}
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' ,type=_lowercase ,default=_lowercase ,help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' ,type=_lowercase ,default=5 ,help='''The maximum total input sequence length after tokenization.''' ,)
parser.add_argument(
'''--num_beams''' ,type=_lowercase ,default=_lowercase ,help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) ,)
parser.add_argument(
'''--model_name_or_path''' ,type=_lowercase ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=_lowercase ,)
parser.add_argument(
'''--config_name''' ,type=_lowercase ,default=_lowercase ,help='''Pretrained config name or path if not the same as model_name''' ,)
parser.add_argument(
'''--device''' ,type=_lowercase ,default='''cpu''' ,help='''Device where the model will be run''' ,)
parser.add_argument('''--output_file_path''' ,type=_lowercase ,default=_lowercase ,help='''Where to store the final ONNX file.''' )
UpperCamelCase = parser.parse_args()
return args
def __snake_case ( _lowercase ,_lowercase="cpu" ):
"""simple docstring"""
UpperCamelCase = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
UpperCamelCase = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = 0
return huggingface_model, tokenizer
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
model.eval()
UpperCamelCase = None
UpperCamelCase = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
UpperCamelCase = '''My friends are cool but they eat too many carbs.'''
UpperCamelCase = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors='''pt''' ).to(model.device )
UpperCamelCase = model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,num_beams=_lowercase ,max_length=_lowercase ,early_stopping=_lowercase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowercase ,(
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowercase ,opset_version=14 ,input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] ,output_names=['''output_ids'''] ,dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} ,example_outputs=_lowercase ,)
logger.info('''Model exported to {}'''.format(_lowercase ) )
UpperCamelCase = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_lowercase ) )
UpperCamelCase = onnxruntime.InferenceSession(_lowercase )
UpperCamelCase = ort_sess.run(
_lowercase ,{
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_lowercase ),
'''max_length''': np.array(_lowercase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = parse_args()
UpperCamelCase = 5
UpperCamelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = torch.device(args.device )
UpperCamelCase , UpperCamelCase = load_model_tokenizer(args.model_name_or_path ,_lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_lowercase )
if args.max_length:
UpperCamelCase = args.max_length
if args.num_beams:
UpperCamelCase = args.num_beams
if args.output_file_path:
UpperCamelCase = args.output_file_path
else:
UpperCamelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
if __name__ == "__main__":
main() | 34 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Tuple ):
# A mock response for an HTTP head request to emulate server down
__lowercase : List[Any] = mock.Mock()
__lowercase : Optional[Any] = 500
__lowercase : List[Any] = {}
__lowercase : str = HTTPError
__lowercase : Optional[int] = {}
# Download this model to make sure it's in the cache.
__lowercase : str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__lowercase : Optional[int] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def snake_case_ ( self : Any ):
# A mock response for an HTTP head request to emulate server down
__lowercase : int = mock.Mock()
__lowercase : List[str] = 500
__lowercase : int = {}
__lowercase : List[Any] = HTTPError
__lowercase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
__lowercase : Dict = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__lowercase : Union[str, Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self : int ):
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase : Tuple = tempfile.mktemp()
with open(_snake_case , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , _snake_case )
__lowercase : List[str] = AlbertTokenizer.from_pretrained(_snake_case )
finally:
os.remove(_snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , _snake_case )
__lowercase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def snake_case_ ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
__lowercase : List[str] = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def snake_case_ ( cls : Optional[Any] ):
__lowercase : List[str] = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def snake_case_ ( cls : Optional[int] ):
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def snake_case_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : List[Any] = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Optional[Any] = BertTokenizer(_snake_case )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
__lowercase : List[str] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case , repo_id='''test-tokenizer''' , push_to_hub=_snake_case , use_auth_token=self._token )
__lowercase : List[str] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def snake_case_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : List[str] = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Tuple = BertTokenizer(_snake_case )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
__lowercase : Any = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_snake_case , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__lowercase : int = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def snake_case_ ( self : Dict ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : Any = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Tuple = CustomTokenizer(_snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : List[str] = os.path.join(_snake_case , '''vocab.txt''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained(_snake_case )
bert_tokenizer.save_pretrained(_snake_case )
__lowercase : List[Any] = CustomTokenizerFast.from_pretrained(_snake_case )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__lowercase : Tuple = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=_snake_case , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def snake_case_ ( self : int ):
__lowercase : int = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def snake_case_ ( self : Dict ):
__lowercase : List[str] = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def snake_case_ ( self : int ):
__lowercase : Optional[int] = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def snake_case_ ( self : Any ):
__lowercase : str = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def snake_case_ ( self : Tuple ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase : List[str] = Trie()
__lowercase : Optional[Any] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_snake_case , ['''AB''', '''C'''] )
| 509 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A: Optional[Any] = logging.get_logger(__name__)
A: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A: List[Any] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
A: Optional[Any] = {
'gpt2': 1_0_2_4,
'gpt2-medium': 1_0_2_4,
'gpt2-large': 1_0_2_4,
'gpt2-xl': 1_0_2_4,
'distilgpt2': 1_0_2_4,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ['input_ids', 'attention_mask']
__lowerCAmelCase : str = GPTaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
UpperCAmelCase : Any = kwargs.pop("""add_bos_token""" , __A )
UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
UpperCAmelCase : Dict = getattr(__A , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : int = add_prefix_space
UpperCAmelCase : Union[str, Any] = pre_tok_class(**__A )
UpperCAmelCase : Tuple = add_prefix_space
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
UpperCAmelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 711 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( UpperCamelCase : list[list[float]] ):
UpperCAmelCase : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase : Any = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCAmelCase : Optional[int] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase : Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase : str = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase : str = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase : Any = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase : Tuple = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 359 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
_lowercase : Any = eval_examples
_lowercase : List[Any] = post_process_function
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = "eval", **lowerCamelCase, ) -> Dict[str, float]:
"""simple docstring"""
_lowercase : Optional[Any] = gen_kwargs.copy()
_lowercase : List[Any] = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
_lowercase : Any = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
_lowercase : Optional[Any] = gen_kwargs
_lowercase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowercase : Optional[int] = self.get_eval_dataloader(lowerCamelCase)
_lowercase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[Any] = self.compute_metrics
_lowercase : int = None
_lowercase : Optional[Any] = time.time()
_lowercase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowercase : int = eval_loop(
lowerCamelCase, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : Union[str, Any] = compute_metrics
_lowercase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : Optional[int] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
else:
_lowercase : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowercase : List[str] = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase)
return metrics
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase = "test", **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = gen_kwargs.copy()
_lowercase : str = self.get_test_dataloader(lowerCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[str] = self.compute_metrics
_lowercase : Any = None
_lowercase : str = time.time()
_lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowercase : str = eval_loop(
lowerCamelCase, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : int = compute_metrics
_lowercase : int = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase, 'predict')
_lowercase : Optional[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : Optional[Any] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase)
| 89 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=__lowerCAmelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=__lowerCAmelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=__lowerCAmelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=__lowerCAmelCase , default=0 , help="""cuda_id.""" , )
lowercase_ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
if not len(__lowerCAmelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new("""RGB""" , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(__lowerCAmelCase ):
grid.paste(__lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="robotic cat with wings" , __lowerCAmelCase=7.5 , __lowerCAmelCase=50 , __lowerCAmelCase=1 , __lowerCAmelCase=42 , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(__lowerCAmelCase )
lowercase_ = pipeline(
__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , ).images
lowercase_ = int(math.sqrt(__lowerCAmelCase ) )
lowercase_ = image_grid(__lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase : str = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
UpperCAmelCase : Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
UpperCAmelCase : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
UpperCAmelCase : Dict = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
UpperCAmelCase : Any = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
UpperCAmelCase : Any = unet.to(torch.device("cuda", args.cuda_id))
UpperCAmelCase : Optional[int] = pipeline.to(unet.device)
UpperCAmelCase , UpperCAmelCase : Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
UpperCAmelCase : str = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 567 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase :
def __init__( self ):
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = 0
def __lowerCAmelCase ( self ):
return self.head == self.tail
def __lowerCAmelCase ( self , _lowerCAmelCase ):
self.data.append(_lowerCAmelCase )
_lowerCAmelCase = self.tail + 1
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.data[self.head]
_lowerCAmelCase = self.head + 1
return ret
def __lowerCAmelCase ( self ):
return self.tail - self.head
def __lowerCAmelCase ( self ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = 1
def __lowerCAmelCase ( self ):
return self.data
def __lowerCAmelCase ( self ):
return self.left
def __lowerCAmelCase ( self ):
return self.right
def __lowerCAmelCase ( self ):
return self.height
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = data
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = node
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = node
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = height
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode | None )->int:
if node is None:
return 0
return node.get_height()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )->int:
if a > b:
return a
return b
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode )->MyNode:
print('''left rotation node:''' , node.get_data() )
_lowerCAmelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode )->MyNode:
print('''right rotation node:''' , node.get_data() )
_lowerCAmelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode )->MyNode:
_lowerCAmelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) )
return right_rotation(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode )->MyNode:
_lowerCAmelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) )
return left_rotation(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode | None , _SCREAMING_SNAKE_CASE : Any )->MyNode | None:
if node is None:
return MyNode(_SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_lowerCAmelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_lowerCAmelCase = right_rotation(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = lr_rotation(_SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , _SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_lowerCAmelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_lowerCAmelCase = rl_rotation(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = left_rotation(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
return node
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode )->Any:
while True:
_lowerCAmelCase = root.get_right()
if right_child is None:
break
_lowerCAmelCase = right_child
return root.get_data()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode )->Any:
while True:
_lowerCAmelCase = root.get_left()
if left_child is None:
break
_lowerCAmelCase = left_child
return root.get_data()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : MyNode , _SCREAMING_SNAKE_CASE : Any )->MyNode | None:
_lowerCAmelCase = root.get_left()
_lowerCAmelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_lowerCAmelCase = get_left_most(_SCREAMING_SNAKE_CASE )
root.set_data(_SCREAMING_SNAKE_CASE )
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
_lowerCAmelCase = left_child
elif right_child is not None:
_lowerCAmelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_lowerCAmelCase = left_rotation(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = rl_rotation(_SCREAMING_SNAKE_CASE )
elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_lowerCAmelCase = right_rotation(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = lr_rotation(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_SCREAMING_SNAKE_CASE )
return root
class UpperCAmelCase :
def __init__( self ):
_lowerCAmelCase = None
def __lowerCAmelCase ( self ):
return get_height(self.root )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
print('''insert:''' + str(_lowerCAmelCase ) )
_lowerCAmelCase = insert_node(self.root , _lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
print('''delete:''' + str(_lowerCAmelCase ) )
if self.root is None:
print('''Tree is empty!''' )
return
_lowerCAmelCase = del_node(self.root , _lowerCAmelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
_lowerCAmelCase = ''''''
_lowerCAmelCase = MyQueue()
q.push(self.root )
_lowerCAmelCase = self.get_height()
if layer == 0:
return output
_lowerCAmelCase = 0
while not q.is_empty():
_lowerCAmelCase = q.pop()
_lowerCAmelCase = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_lowerCAmelCase )
q.push(_lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_lowerCAmelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _lowerCAmelCase ) - 1:
_lowerCAmelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase__ ( )->None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCAmelCase_ = AVLtree()
UpperCAmelCase_ = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1 | 664 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=24 , a__=2 , a__=6 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=None , a__=1000 , ):
_lowerCAmelCase : str = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : str = range_bbox
def __A ( self ):
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : Tuple = bbox[i, j, 1]
_lowerCAmelCase : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Tuple = bbox[i, j, 2]
_lowerCAmelCase : Union[str, Any] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : str = LiltModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : Union[str, Any] = model(a__ , bbox=a__ , token_type_ids=a__ )
_lowerCAmelCase : Dict = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : List[str] = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = config_and_inputs
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[Any] = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
return True
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = LiltModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(a__ )
_lowerCAmelCase : Dict = torch.tensor([[1, 2]] , device=a__ )
_lowerCAmelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(input_ids=a__ , bbox=a__ )
_lowerCAmelCase : Any = torch.Size([1, 2, 768] )
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1e-3 ) )
| 213 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
def __init__( self , a__ , a__ , a__ ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_lowerCAmelCase : List[Any] = img
_lowerCAmelCase : Dict = img.shape[1]
_lowerCAmelCase : Optional[Any] = img.shape[0]
_lowerCAmelCase : str = dst_width
_lowerCAmelCase : Tuple = dst_height
_lowerCAmelCase : Optional[int] = self.src_w / self.dst_w
_lowerCAmelCase : List[str] = self.src_h / self.dst_h
_lowerCAmelCase : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __A ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_lowerCAmelCase : int = self.img[self.get_y(a__ )][self.get_x(a__ )]
def __A ( self , a__ ):
return int(self.ratio_x * x )
def __A ( self , a__ ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_a , _a : int = 800, 600
_a : Optional[int] = imread('image_data/lena.jpg', 1)
_a : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 213 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __lowercase( __snake_case : int ) -> Union[str, Any]:
config.addinivalue_line(
'markers' ,'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' ,'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' ,'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' ,'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' ,'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' ,'tool_tests: mark the tool tests that are run on their specific schedule' )
def __lowercase( __snake_case : Union[str, Any] ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__snake_case )
def __lowercase( __snake_case : Dict ) -> Optional[int]:
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__snake_case ,id=__snake_case )
def __lowercase( __snake_case : Optional[Any] ,__snake_case : Dict ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__snake_case = 0
# Doctest custom flag to ignore output.
lowerCamelCase_ : List[str] = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase_ : Tuple = doctest.OutputChecker
class _lowerCamelCase (lowerCamelCase ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ : int = CustomOutputChecker
lowerCamelCase_ : List[Any] = HfDoctestModule
lowerCamelCase_ : List[Any] = HfDocTestParser
| 345 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowercase( __snake_case : Tuple ) -> str:
__snake_case = SwinvaConfig()
__snake_case = swinva_name.split('_' )
__snake_case = name_split[1]
if "to" in name_split[3]:
__snake_case = int(name_split[3][-3:] )
else:
__snake_case = int(name_split[3] )
if "to" in name_split[2]:
__snake_case = int(name_split[2][-2:] )
else:
__snake_case = int(name_split[2][6:] )
if model_size == "tiny":
__snake_case = 96
__snake_case = (2, 2, 6, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "small":
__snake_case = 96
__snake_case = (2, 2, 18, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "base":
__snake_case = 1_28
__snake_case = (2, 2, 18, 2)
__snake_case = (4, 8, 16, 32)
else:
__snake_case = 1_92
__snake_case = (2, 2, 18, 2)
__snake_case = (6, 12, 24, 48)
if "to" in swinva_name:
__snake_case = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__snake_case = 2_18_41
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-22k-id2label.json'
__snake_case = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='dataset' ) ,'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
else:
__snake_case = 10_00
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='dataset' ) ,'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = img_size
__snake_case = num_classes
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
return config
def __lowercase( __snake_case : List[str] ) -> Any:
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' ,'embeddings.norm' )
if "layers" in name:
__snake_case = 'encoder.' + name
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' ,'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' ,'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' ,'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' ,'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' ,'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' ,'continuous_position_bias_mlp' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "head" in name:
__snake_case = name.replace('head' ,'classifier' )
else:
__snake_case = 'swinv2.' + name
return name
def __lowercase( __snake_case : str ,__snake_case : Optional[int] ) -> str:
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(__snake_case )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[3] )
__snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[
dim : dim * 2
]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def __lowercase( __snake_case : Tuple ,__snake_case : Any ) -> Tuple:
__snake_case = timm.create_model(__snake_case ,pretrained=__snake_case )
timm_model.eval()
__snake_case = get_swinva_config(__snake_case )
__snake_case = SwinvaForImageClassification(__snake_case )
model.eval()
__snake_case = convert_state_dict(timm_model.state_dict() ,__snake_case )
model.load_state_dict(__snake_case )
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' ,'-' ) ) )
__snake_case = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
__snake_case = image_processor(images=__snake_case ,return_tensors='pt' )
__snake_case = timm_model(inputs['pixel_values'] )
__snake_case = model(**__snake_case ).logits
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.push_to_hub(
repo_path_or_name=Path(__snake_case ,__snake_case ) ,organization='nandwalritik' ,commit_message='Add model' ,)
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase_ : Tuple = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 345 | 1 |
"""simple docstring"""
from __future__ import annotations
A: Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _snake_case ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : list[list[int]] , ):
UpperCAmelCase : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the reference grid
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the action grid
UpperCAmelCase : Dict = init[0]
UpperCAmelCase : Union[str, Any] = init[1]
UpperCAmelCase : Dict = 0
UpperCAmelCase : Tuple = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase : str = [[f, g, x, y]]
UpperCAmelCase : str = False # flag that is set when search is complete
UpperCAmelCase : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase : Tuple = cell.pop()
UpperCAmelCase : List[str] = next_cell[2]
UpperCAmelCase : Optional[int] = next_cell[3]
UpperCAmelCase : Any = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase : Tuple = True
else:
for i in range(len(UpperCamelCase ) ): # to try out different valid actions
UpperCAmelCase : List[Any] = x + DIRECTIONS[i][0]
UpperCAmelCase : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase : Any = g + cost
UpperCAmelCase : int = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = i
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = goal[0]
UpperCAmelCase : List[str] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase : Any = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase : int = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase : Optional[Any] = xa
UpperCAmelCase : int = ya
invpath.append([x, y] )
UpperCAmelCase : List[str] = []
for i in range(len(UpperCamelCase ) ):
path.append(invpath[len(UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
A: Tuple = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
A: List[str] = [0, 0]
# all coordinates are given in format [y,x]
A: Any = [len(grid) - 1, len(grid[0]) - 1]
A: Any = 1
# the cost map which pushes the path closer to the goal
A: str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
A: Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
A: int = 9_9
A , A: Tuple = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 160 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : str , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = word.split()
def justify(UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) -> str:
UpperCAmelCase : List[Any] = max_width - width
UpperCAmelCase : Any = len(UpperCamelCase )
if len(UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase : Optional[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase : List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase : Tuple = []
for i in range(UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : list[str] = []
UpperCAmelCase : Union[str, Any] = 0
for word in words:
if width + len(UpperCamelCase ) + len(UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase )
width += len(UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
# reset new line and new width
UpperCAmelCase , UpperCAmelCase : Tuple = [word], len(UpperCamelCase )
UpperCAmelCase : Optional[Any] = max_width - width - len(UpperCamelCase )
answer.append(""" """.join(UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 160 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCamelCase_ ( UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[Any] = 10_00
__UpperCAmelCase : str = '''huggingface/label-files'''
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = CvtConfig(num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__UpperCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__UpperCAmelCase : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__UpperCAmelCase : str = [2, 2, 20]
__UpperCAmelCase : Optional[int] = [3, 12, 16]
__UpperCAmelCase : Dict = [1_92, 7_68, 10_24]
__UpperCAmelCase : Dict = CvtForImageClassification(lowercase__ )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__UpperCAmelCase : int = image_size
__UpperCAmelCase : List[Any] = torch.load(lowercase__ , map_location=torch.device('''cpu''' ) )
__UpperCAmelCase : List[str] = OrderedDict()
__UpperCAmelCase : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__UpperCAmelCase : Any = list_of_state_dict + cls_token(lowercase__ )
__UpperCAmelCase : Any = list_of_state_dict + embeddings(lowercase__ )
for cnt in range(config.depth[idx] ):
__UpperCAmelCase : Dict = list_of_state_dict + attention(lowercase__ , lowercase__ )
__UpperCAmelCase : Any = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowercase__ )
for i in range(len(lowercase__ ) ):
__UpperCAmelCase : int = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase__ :Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 702 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ ) ->float:
"""simple docstring"""
return 10 - x * x
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->float:
"""simple docstring"""
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase : Tuple = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase : List[str] = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) < 0:
__UpperCAmelCase : Union[str, Any] = c
else:
__UpperCAmelCase : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 374 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def _A( self ):
lowercase =load_tool('''text-classification''' )
self.tool.setup()
lowercase =load_tool('''text-classification''' , remote=lowercase__ )
def _A( self ):
lowercase =self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def _A( self ):
lowercase =self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def _A( self ):
lowercase =self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def _A( self ):
lowercase =self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
| 72 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["pixel_values"]
def __init__( self : Dict , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : int = 0.9 , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : Union[int, float] = 1 / 2_5_5 , lowercase__ : bool = True , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , **lowercase__ : Dict , ):
super().__init__(**lowercase__ )
__lowercase : List[str] = size if size is not None else {"shortest_edge": 2_2_4}
__lowercase : int = get_size_dict(lowercase__ , default_to_square=lowercase__ )
__lowercase : Tuple = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase : str = get_size_dict(lowercase__ , param_name="crop_size" )
__lowercase : Tuple = do_resize
__lowercase : Tuple = size
__lowercase : Any = crop_pct
__lowercase : List[str] = resample
__lowercase : str = do_center_crop
__lowercase : int = crop_size
__lowercase : List[str] = do_rescale
__lowercase : List[Any] = rescale_factor
__lowercase : Union[str, Any] = do_normalize
__lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case ( self : Any , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : Optional[float] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ):
__lowercase : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
__lowercase : Any = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowercase : Tuple = int(size["height"] / crop_pct )
else:
__lowercase : Optional[int] = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(lowercase__ ) )
__lowercase : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
__lowercase : Any = get_resize_output_image_size(lowercase__ , size=size["shortest_edge"] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
__lowercase : List[Any] = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : Optional[int] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Union[str, Any] , ):
__lowercase : str = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["height"], size["width"]) , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Tuple , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : Union[str, Any] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : List[str] , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : Dict , lowercase__ : ImageInput , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : int = None , lowercase__ : PILImageResampling = None , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : bool = None , lowercase__ : float = None , lowercase__ : bool = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : int , ):
__lowercase : str = do_resize if do_resize is not None else self.do_resize
__lowercase : Any = crop_pct if crop_pct is not None else self.crop_pct
__lowercase : List[str] = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Any = image_mean if image_mean is not None else self.image_mean
__lowercase : int = image_std if image_std is not None else self.image_std
__lowercase : Optional[int] = size if size is not None else self.size
__lowercase : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(lowercase__ , param_name="crop_size" )
__lowercase : Optional[int] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase : Optional[Any] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase : List[str] = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase : Any = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase : Tuple = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
__lowercase : Any = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__lowercase : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 575 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
debug_launcher(test_script.main )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
debug_launcher(test_ops.main )
| 501 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : Union[str, Any]=7 ):
snake_case : Dict = None
if token is not None:
snake_case : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case : Union[str, Any] = "636036"
snake_case : Union[str, Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case : Dict = requests.get(lowercase__ , headers=lowercase__ ).json()
return result["workflow_runs"]
def UpperCamelCase__ ( lowercase__ : List[str] ):
snake_case : str = get_daily_ci_runs(lowercase__ )
snake_case : int = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case : int = workflow_run["id"]
break
return workflow_run_id
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Tuple ):
snake_case : Union[str, Any] = get_last_daily_ci_runs(lowercase__ )
if workflow_run_id is not None:
snake_case : Optional[int] = get_artifacts_links(worflow_run_id=lowercase__ , token=lowercase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case : List[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase__ , artifact_url=lowercase__ , output_dir=lowercase__ , token=lowercase__ )
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : Optional[int] ):
get_last_daily_ci_artifacts(lowercase__ , lowercase__ , lowercase__ )
snake_case : List[Any] = {}
for artifact_name in artifact_names:
snake_case : Optional[int] = os.path.join(lowercase__ , F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase__ ):
snake_case : Dict = {}
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
with z.open(lowercase__ ) as f:
snake_case : List[Any] = f.read().decode("UTF-8" )
return results
| 134 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : str = [int(lowercase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 134 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a__ : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a__ : str = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE : Any = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_lowerCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE : List[str] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _lowerCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
SCREAMING_SNAKE_CASE : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
SCREAMING_SNAKE_CASE : List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
SCREAMING_SNAKE_CASE : int = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
SCREAMING_SNAKE_CASE : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE : Union[str, Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 706 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if equation(a__ ) * equation(a__ ) >= 0:
raise ValueError('''Wrong space!''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
SCREAMING_SNAKE_CASE : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(a__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(a__ ) * equation(a__ ) < 0:
SCREAMING_SNAKE_CASE : Optional[int] = c
else:
SCREAMING_SNAKE_CASE : int = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 333 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 368 |
'''simple docstring'''
from __future__ import annotations
a__ : Optional[int] = list[tuple[int, int]]
a__ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any:
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = g_cost
snake_case__ = parent
snake_case__ = self.calculate_heuristic()
def _snake_case ( self ) -> float:
snake_case__ = abs(self.pos_x - self.goal_x )
snake_case__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase_ ) -> bool:
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase_ )
snake_case__ = [self.start]
snake_case__ = []
snake_case__ = False
def _snake_case ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
snake_case__ = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
snake_case__ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self , UpperCamelCase_ ) -> list[Node]:
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def _snake_case ( self , UpperCamelCase_ ) -> Path:
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a__ : List[str] = (0, 0)
a__ : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
a__ : Optional[int] = GreedyBestFirst(init, goal)
a__ : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a__ : Tuple = 2
for elem in grid:
print(elem)
| 368 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
if num < 0:
return False
_UpperCAmelCase : Dict =num
_UpperCAmelCase : int =0
while num > 0:
_UpperCAmelCase : Tuple =rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowercase ={
'RUCAIBox/mvp': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
UpperCAmelCase =MvpTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type'))
_UpperCAmelCase : Union[str, Any] =add_prefix_space
_UpperCAmelCase : Optional[Any] =pre_tok_class(**snake_case)
_UpperCAmelCase : Union[str, Any] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : List[Any] ='post_processor'
_UpperCAmelCase : Optional[int] =getattr(self.backend_tokenizer , snake_case , snake_case)
if tokenizer_component_instance:
_UpperCAmelCase : int =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any =tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase : List[str] =tuple(state['cls'])
_UpperCAmelCase : str =False
if state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =add_prefix_space
_UpperCAmelCase : Optional[int] =True
if state.get('trim_offsets' , snake_case) != trim_offsets:
_UpperCAmelCase : Union[str, Any] =trim_offsets
_UpperCAmelCase : Tuple =True
if changes_to_apply:
_UpperCAmelCase : str =getattr(snake_case , state.pop('type'))
_UpperCAmelCase : List[Any] =component_class(**snake_case)
setattr(self.backend_tokenizer , snake_case , snake_case)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value
_UpperCAmelCase : Any =value
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Any =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : str =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =[self.sep_token_id]
_UpperCAmelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 331 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = "ylacombe/bark-small"
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Dict = "en_speaker_1"
_lowerCamelCase : List[str] = "This is a test string"
_lowerCamelCase : Optional[int] = "speaker_embeddings_path.json"
_lowerCamelCase : Dict = "speaker_embeddings"
def _lowercase ( self: str ,**__lowerCAmelCase: int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint ,**__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
_lowerCamelCase : Any = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
_lowerCamelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="(BOS)" ,eos_token="(EOS)" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
_lowerCamelCase : Union[str, Any] = 35
_lowerCamelCase : Optional[Any] = 2
_lowerCamelCase : int = 8
_lowerCamelCase : Union[str, Any] = {
"semantic_prompt": np.ones(__lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCamelCase : Union[str, Any] = processor(text=self.input_string ,voice_preset=__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__lowerCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname ,"file.npz" )
np.savez(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : str = processor(text=self.input_string ,voice_preset=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__lowerCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCamelCase : Optional[Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = BarkProcessor(tokenizer=__lowerCAmelCase )
_lowerCamelCase : Tuple = processor(text=self.input_string )
_lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string ,padding="max_length" ,max_length=256 ,add_special_tokens=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() ) | 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase ) | 46 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__A = {
"facebook/nllb-large-en-ro": 10_24,
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
__A = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = NllbTokenizer
snake_case_ = []
snake_case_ = []
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
__lowerCamelCase = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , legacy_behaviour=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
__lowerCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__lowerCamelCase = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCamelCase = src_lang if src_lang is not None else 'eng_Latn'
__lowerCamelCase = self.convert_tokens_to_ids(self._src_lang )
__lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowerCamelCase = src_lang
__lowerCamelCase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ )
__lowerCamelCase = tgt_lang_id
return inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = "eng_Latn" , lowerCamelCase__ = None , lowerCamelCase__ = "fra_Latn" , **lowerCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
__lowerCamelCase = []
__lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCamelCase = [self.cur_lang_code]
__lowerCamelCase = [self.eos_token_id]
__lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
__lowerCamelCase = []
__lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCamelCase = [self.cur_lang_code]
__lowerCamelCase = [self.eos_token_id]
__lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 710 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__A = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__A = dataset.iloc[:, 1:2].values
__A = dataset.iloc[:, 2].values
__A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0)
__A = PolynomialFeatures(degree=4)
__A = poly_reg.fit_transform(X)
__A = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 167 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.