code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase_ = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase_ = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def _UpperCAmelCase ( self ) -> Dict:
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def _UpperCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase_ = FlaxAutoModel.from_pretrained('bert-base' )
def _UpperCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase_ = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def _UpperCAmelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
UpperCamelCase_ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _UpperCAmelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(UpperCamelCase__ , 'Use `from_pt=True` to load this model' ):
UpperCamelCase_ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 23 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[int] = ["image_processor", "tokenizer"]
__lowercase :int = "ChineseCLIPImageProcessor"
__lowercase :Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
lowerCamelCase_ = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class | 142 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ : Union[str, Any] = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase__ : List[str] = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
lowercase__ : int = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = RoFormerTokenizer
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : List[Any]=None , __lowercase : Any=True , __lowercase : Any="[UNK]" , __lowercase : Optional[int]="[SEP]" , __lowercase : List[Any]="[PAD]" , __lowercase : Any="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : int=True , __lowercase : str=None , **__lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , __lowercase ) != do_lower_case
or pre_tok_state.get("strip_accents" , __lowercase ) != strip_accents
):
snake_case_ = getattr(__lowercase , pre_tok_state.pop("type" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = pre_tok_class(**__lowercase )
snake_case_ = do_lower_case
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = BertPreTokenizer()
return state
def __setstate__( self : str , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = d
snake_case_ = self.__dict__["_tokenizer"].get_vocab()
snake_case_ = PreTokenizer.custom(JiebaPreTokenizer(__lowercase ) )
def snake_case__ ( self : str , __lowercase : Dict , __lowercase : str=None ):
"""simple docstring"""
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Tuple , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : List[str] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : str , __lowercase : Tuple=None , __lowercase : Any=None , __lowercase : Dict=False , **__lowercase : Tuple , ):
"""simple docstring"""
snake_case_ = BertPreTokenizer()
return super().save_pretrained(__lowercase , __lowercase , __lowercase , __lowercase , **__lowercase )
| 139 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int]=13 , __lowercase : str=7 , __lowercase : str=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=False , __lowercase : str=True , __lowercase : Optional[int]=99 , __lowercase : List[str]=32 , __lowercase : Tuple=5 , __lowercase : int=4 , __lowercase : Union[str, Any]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : Dict=0.1 , __lowercase : int=0.1 , __lowercase : Optional[Any]=5_12 , __lowercase : Any=16 , __lowercase : int=2 , __lowercase : Dict=0.02 , __lowercase : List[str]=3 , __lowercase : int=4 , __lowercase : str=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Any ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict , __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase )
snake_case_ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : int , __lowercase : str , __lowercase : str , __lowercase : List[Any] , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
snake_case_ = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : str , __lowercase : Dict , __lowercase : int , __lowercase : Optional[int] , __lowercase : int , __lowercase : str , ):
"""simple docstring"""
snake_case_ = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Tuple , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = True
snake_case_ = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )["hidden_states"][0]
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )["hidden_states"][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = LlamaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(__lowercase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = "single_label_classification"
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(__lowercase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = "multi_label_classification"
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(__lowercase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def snake_case__ ( self : Any , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10] , config.vocab_size )
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case_ = original_model(__lowercase ).last_hidden_state
snake_case_ = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {"type": scaling_type, "factor": 10.0}
snake_case_ = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case_ = scaled_model(__lowercase ).last_hidden_state
snake_case_ = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
snake_case_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
snake_case_ = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
snake_case_ = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
snake_case_ = model(torch.tensor(__lowercase ) )
snake_case_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# fmt: off
snake_case_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
snake_case_ = "Simply put, the theory of relativity states that "
snake_case_ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
snake_case_ = tokenizer.encode(__lowercase , return_tensors="pt" )
snake_case_ = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__lowercase )
# greedy generation outputs
snake_case_ = model.generate(__lowercase , max_new_tokens=64 , top_p=__lowercase , temperature=1 , do_sample=__lowercase )
snake_case_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
| 139 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = 'RegNetConfig'
# Base docstring
_A = 'facebook/regnet-y-040'
_A = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_A = 'facebook/regnet-y-040'
_A = 'tabby, tabby cat'
_A = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : int , _snake_case : int = 3 , _snake_case : int = 1 , _snake_case : int = 1 , _snake_case : Optional[str] = "relu" , **_snake_case : List[Any] , ) -> Dict:
super().__init__(**lowerCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding="VALID" , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="convolution" , )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
SCREAMING_SNAKE_CASE__ = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : Dict , _snake_case : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.convolution(self.padding(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = self.normalization(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.activation(lowerCamelCase_ )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : RegNetConfig , **_snake_case : List[str] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = config.num_channels
SCREAMING_SNAKE_CASE__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase_ ( self : List[str] , _snake_case : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = shape_list(lowerCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE__ = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ = self.embedder(lowerCamelCase_ )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , _snake_case : int , _snake_case : int = 2 , **_snake_case : List[str] ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="convolution" )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase_ ( self : Dict , _snake_case : tf.Tensor , _snake_case : bool = False ) -> List[str]:
return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ )
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , _snake_case : int , _snake_case : int , **_snake_case : str ) -> List[str]:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="pooler" )
SCREAMING_SNAKE_CASE__ = [
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase_ ( self : List[Any] , _snake_case : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.pooler(lowerCamelCase_ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE__ = layer_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = hidden_state * pooled
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , _snake_case : RegNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 1 , **_snake_case : Tuple ) -> List[str]:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE__ = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="layer.2" ),
]
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ = layer_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
SCREAMING_SNAKE_CASE__ = self.activation(lowerCamelCase_ )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , _snake_case : RegNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 1 , **_snake_case : Union[str, Any] ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
SCREAMING_SNAKE_CASE__ = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="layer.3" ),
]
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Dict , _snake_case : Any ) -> int:
SCREAMING_SNAKE_CASE__ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ = layer_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
SCREAMING_SNAKE_CASE__ = self.activation(lowerCamelCase_ )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : RegNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 2 , _snake_case : int = 2 , **_snake_case : Optional[int] ) -> Optional[Any]:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
SCREAMING_SNAKE_CASE__ = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name="layers.0" ),
*[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : List[str] , _snake_case : Any ) -> int:
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ = layer_module(lowerCamelCase_ )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , _snake_case : RegNetConfig , **_snake_case : List[str] ) -> int:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
SCREAMING_SNAKE_CASE__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : tf.Tensor , _snake_case : bool = False , _snake_case : bool = True ) -> int:
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE__ = stage_module(lowerCamelCase_ )
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
@keras_serializable
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
a = RegNetConfig
def __init__( self : str , _snake_case : int , **_snake_case : str ) -> str:
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = TFRegNetEmbeddings(lowerCamelCase_ , name="embedder" )
SCREAMING_SNAKE_CASE__ = TFRegNetEncoder(lowerCamelCase_ , name="encoder" )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="pooler" )
@unpack_inputs
def lowerCAmelCase_ ( self : List[Any] , _snake_case : tf.Tensor , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , ) -> Dict:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
SCREAMING_SNAKE_CASE__ = self.pooler(lowerCamelCase_ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE__ = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE__ = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase (lowercase_ ):
'''simple docstring'''
a = RegNetConfig
a = "regnet"
a = "pixel_values"
@property
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_A = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_A = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowercase_ , )
class lowerCamelCase (lowercase_ ):
'''simple docstring'''
def __init__( self : Dict , _snake_case : RegNetConfig , *_snake_case : Union[str, Any] , **_snake_case : int ) -> List[Any]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = TFRegNetMainLayer(lowerCamelCase_ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : Dict , _snake_case : tf.Tensor , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : List[Any]=False , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.regnet(
pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase_ , )
class lowerCamelCase (lowercase_ , lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , _snake_case : RegNetConfig , *_snake_case : Union[str, Any] , **_snake_case : List[Any] ) -> Optional[int]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = TFRegNetMainLayer(lowerCamelCase_ , name="regnet" )
# classification head
SCREAMING_SNAKE_CASE__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : str , _snake_case : tf.Tensor = None , _snake_case : tf.Tensor = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Any]=False , ) -> Dict:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.regnet(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__ = self.classifier[0](lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.classifier[1](lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 159 |
'''simple docstring'''
from math import sqrt
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def __A ( lowerCamelCase_ = 1_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 379 | 0 |
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> int:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""String lengths must match!""" )
__UpperCamelCase : Optional[Any] = 0
for chara, chara in zip(__lowerCAmelCase , __lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 515 |
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while second != 0:
__UpperCamelCase : int = first & second
first ^= second
__UpperCamelCase : Dict = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input('Enter the first number: ').strip())
UpperCamelCase = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 515 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field(default_factory=a)
__SCREAMING_SNAKE_CASE = field(default_factory=a)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> str:
_A : Any = len(list(m.modules())) == 1 or isinstance(__lowerCamelCase , nn.Convad) or isinstance(__lowerCamelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__lowerCamelCase)
def __call__( self , __lowerCamelCase) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__lowerCamelCase)
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self) -> Union[str, Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __lowerCamelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = field(default_factory=a)
__SCREAMING_SNAKE_CASE = field(default_factory=a)
__SCREAMING_SNAKE_CASE = True
def __call__( self , __lowerCamelCase) -> str:
_A : str = Tracker(self.dest)(__lowerCamelCase).parametrized
_A : List[Any] = Tracker(self.src)(__lowerCamelCase).parametrized
_A : str = list(filter(lambda __lowerCamelCase: type(__lowerCamelCase) not in self.src_skip , __lowerCamelCase))
_A : Any = list(filter(lambda __lowerCamelCase: type(__lowerCamelCase) not in self.dest_skip , __lowerCamelCase))
if len(__lowerCamelCase) != len(__lowerCamelCase) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(__lowerCamelCase)} operations while"
F" destination module has {len(__lowerCamelCase)}.")
for dest_m, src_m in zip(__lowerCamelCase , __lowerCamelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}")
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> int:
super().__init__()
_A : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block"), F"Unexpected layer name {k}"
_A : Optional[Any] = len(__lowerCamelCase) + 1
feature_blocks.append((F"res{block_index}", v))
_A : str = nn.ModuleDict(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return get_trunk_forward_outputs(
__lowerCamelCase , out_feat_keys=__lowerCamelCase , feature_blocks=self._feature_blocks , )
class lowerCAmelCase__ ( a):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : Optional[Any] = x.split("-")
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self , __lowerCamelCase) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
_A : Dict = self.convert_name_to_timm(__lowerCamelCase)
_A : List[str] = partial(lambda: (timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase).eval(), None))
else:
_A : Dict = super().__getitem__(__lowerCamelCase)
return val
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __getitem__( self , __lowerCamelCase) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
_A : Optional[Any] = RegNetModel
else:
_A : List[Any] = RegNetForImageClassification
return val
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Tuple[str, str]] ):
for from_key, to_key in keys:
_A : int = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Callable[[], nn.Module] , UpperCamelCase__ : Callable[[], nn.Module] , UpperCamelCase__ : RegNetConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True , ):
print(f"Converting {name}..." )
with torch.no_grad():
_A , _A : int = from_model_func()
_A : Optional[Any] = our_model_func(UpperCamelCase__ ).eval()
_A : Any = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ , raise_if_mismatch=UpperCamelCase__ )
_A : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
if from_state_dict is not None:
_A : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_A : Optional[int] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_A : Optional[int] = manually_copy_vissl_head(UpperCamelCase__ , our_model.state_dict() , UpperCamelCase__ )
our_model.load_state_dict(UpperCamelCase__ )
_A : List[Any] = our_model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
_A : Optional[Any] = (
our_outputs.logits if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else our_outputs.last_hidden_state
)
_A : List[Any] = from_model(UpperCamelCase__ )
_A : List[str] = from_output[-1] if type(UpperCamelCase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_A : Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=UpperCamelCase__ , )
_A : List[Any] = 224 if "seer" not in name else 384
# we can use the convnext one
_A : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=UpperCamelCase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=UpperCamelCase__ , )
print(f"Pushed {name}" )
def _UpperCAmelCase (UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ):
_A : List[str] = "imagenet-1k-id2label.json"
_A : Dict = 1000
_A : int = (1, num_labels)
_A : Any = "huggingface/label-files"
_A : int = num_labels
_A : int = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) ) , "r" ) )
_A : int = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
_A : Union[str, Any] = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_A : str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
_A : Optional[int] = NameToOurModelFuncMap()
_A : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(UpperCamelCase__ : str , UpperCamelCase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_A : Union[str, Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , model_dir=str(UpperCamelCase__ ) , map_location="cpu" )
_A : List[Any] = model_func()
# check if we have a head, if yes add it
_A : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
_A : Union[str, Any] = model_state_dict["trunk"]
model.load_state_dict(UpperCamelCase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
_A : str = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A : int = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A : Optional[int] = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_A : Dict = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_A : List[Any] = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A : Optional[int] = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A : List[Any] = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_A : int = partial(
UpperCamelCase__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
UpperCamelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
UpperCamelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 503 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PIL.Image.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase)
_A : Optional[Any] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
_A : List[Any] = get_size_dict(__lowerCamelCase)
_A : str = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_A : int = get_size_dict(__lowerCamelCase , param_name="crop_size")
_A : str = do_resize
_A : Tuple = size
_A : int = resample
_A : int = do_center_crop
_A : Union[str, Any] = crop_size
_A : Any = do_rescale
_A : str = rescale_factor
_A : Optional[int] = do_normalize
_A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PIL.Image.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_A : List[str] = get_size_dict(__lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return resize(
__lowerCamelCase , size=(size["height"], size["width"]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_A : Dict = get_size_dict(__lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> str:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> PIL.Image.Image:
_A : int = do_resize if do_resize is not None else self.do_resize
_A : Tuple = resample if resample is not None else self.resample
_A : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Any = do_normalize if do_normalize is not None else self.do_normalize
_A : Tuple = image_mean if image_mean is not None else self.image_mean
_A : Tuple = image_std if image_std is not None else self.image_std
_A : Any = size if size is not None else self.size
_A : List[Any] = get_size_dict(__lowerCamelCase)
_A : List[Any] = crop_size if crop_size is not None else self.crop_size
_A : Optional[int] = get_size_dict(__lowerCamelCase , param_name="crop_size")
_A : Any = make_list_of_images(__lowerCamelCase)
if not valid_images(__lowerCamelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_A : Dict = [to_numpy_array(__lowerCamelCase) for image in images]
if do_resize:
_A : List[str] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase) for image in images]
if do_center_crop:
_A : Union[str, Any] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase) for image in images]
if do_rescale:
_A : str = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase) for image in images]
if do_normalize:
_A : Optional[int] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase) for image in images]
_A : Tuple = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase) for image in images]
_A : List[str] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase)
| 503 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ ) -> None:
SCREAMING_SNAKE_CASE : List[Any] = order
# a_{0} ... a_{k}
SCREAMING_SNAKE_CASE : int = [1.0] + [0.0] * order
# b_{0} ... b_{k}
SCREAMING_SNAKE_CASE : Optional[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
SCREAMING_SNAKE_CASE : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
SCREAMING_SNAKE_CASE : Any = [0.0] * self.order
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> None:
if len(lowercase__ ) < self.order:
SCREAMING_SNAKE_CASE : int = [1.0, *a_coeffs]
if len(lowercase__ ) != self.order + 1:
SCREAMING_SNAKE_CASE : Tuple = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowercase__ )}"""
)
raise ValueError(lowercase__ )
if len(lowercase__ ) != self.order + 1:
SCREAMING_SNAKE_CASE : str = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowercase__ )}"""
)
raise ValueError(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = a_coeffs
SCREAMING_SNAKE_CASE : Tuple = b_coeffs
def _UpperCamelCase ( self , lowercase__ ) -> float:
SCREAMING_SNAKE_CASE : Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
SCREAMING_SNAKE_CASE : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
SCREAMING_SNAKE_CASE : Tuple = self.input_history[:-1]
SCREAMING_SNAKE_CASE : List[str] = self.output_history[:-1]
SCREAMING_SNAKE_CASE : List[str] = sample
SCREAMING_SNAKE_CASE : int = result
return result
| 179 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
_lowerCAmelCase :List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCAmelCase :List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase ( a_ ) -> List[str]:
'''simple docstring'''
with open(a_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(a_ )
return im.convert('RGB' )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case__ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
snake_case__ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case__ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _UpperCamelCase ( self ) -> Optional[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : str = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , a_ , a_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : int = os.path.join(data_args.validation_dir , '**' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
'imagefolder' , data_files=a_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Optional[int] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : Optional[int] = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : List[Any] = split['train']
SCREAMING_SNAKE_CASE : Tuple = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : List[Any] = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = {}, {}
for i, label in enumerate(a_ ):
SCREAMING_SNAKE_CASE : List[str] = str(a_ )
SCREAMING_SNAKE_CASE : Dict = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : List[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a_ ) , labelaid=a_ , idalabel=a_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Any = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
RandomResizedCrop(a_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
Resize(a_ ),
CenterCrop(a_ ),
ToTensor(),
normalize,
] )
def train_transforms(a_ ):
SCREAMING_SNAKE_CASE : int = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(a_ ):
SCREAMING_SNAKE_CASE : Any = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : int = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a_ )
# Initalize our trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Tuple = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , a_ )
trainer.save_metrics('eval' , a_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
if __name__ == "__main__":
main()
| 179 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_proj
lowerCAmelCase_ = cutoffs + [vocab_size]
lowerCAmelCase_ = [0] + self.cutoffs
lowerCAmelCase_ = div_val
lowerCAmelCase_ = self.cutoffs[0]
lowerCAmelCase_ = len(self.cutoffs ) - 1
lowerCAmelCase_ = self.shortlist_size + self.n_clusters
lowerCAmelCase_ = keep_order
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def UpperCAmelCase_ ( self , _lowerCamelCase ):
if self.n_clusters > 0:
lowerCAmelCase_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_lowerCamelCase , name='''cluster_weight''' )
lowerCAmelCase_ = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_lowerCamelCase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_lowerCamelCase , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_lowerCamelCase )
else:
self.out_projs.append(_lowerCamelCase )
lowerCAmelCase_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_lowerCamelCase , name=F'''out_layers_._{i}_._weight''' , )
lowerCAmelCase_ = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_lowerCamelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ = self.d_embed // (self.div_val**i)
lowerCAmelCase_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_lowerCamelCase , name=F'''out_projs_._{i}''' )
self.out_projs.append(_lowerCamelCase )
lowerCAmelCase_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_lowerCamelCase , name=F'''out_layers_._{i}_._weight''' , )
lowerCAmelCase_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_lowerCamelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowerCAmelCase_ = x
if proj is not None:
lowerCAmelCase_ = tf.einsum('''ibd,ed->ibe''' , _lowerCamelCase , _lowerCamelCase )
return tf.einsum('''ibd,nd->ibn''' , _lowerCamelCase , _lowerCamelCase ) + b
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = shape_list(_lowerCamelCase )
lowerCAmelCase_ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase_ = tf.stack([r, target] , 1 )
return tf.gather_nd(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=False ):
lowerCAmelCase_ = 0
if self.n_clusters == 0:
lowerCAmelCase_ = self._logit(_lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_lowerCamelCase , logits=_lowerCamelCase )
lowerCAmelCase_ = tf.nn.log_softmax(_lowerCamelCase , axis=-1 )
else:
lowerCAmelCase_ = shape_list(_lowerCamelCase )
lowerCAmelCase_ = []
lowerCAmelCase_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase_ = (target >= l_idx) & (target < r_idx)
lowerCAmelCase_ = tf.where(_lowerCamelCase )
lowerCAmelCase_ = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCAmelCase_ = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase_ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase_ = self.out_layers[i][0]
lowerCAmelCase_ = self.out_layers[i][1]
if i == 0:
lowerCAmelCase_ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase_ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase_ = self._logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.out_projs[0] )
lowerCAmelCase_ = tf.nn.log_softmax(_lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase_ = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = self._gather_logprob(_lowerCamelCase , _lowerCamelCase )
else:
lowerCAmelCase_ = self._logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.out_projs[i] )
lowerCAmelCase_ = tf.nn.log_softmax(_lowerCamelCase )
lowerCAmelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_lowerCamelCase )
if target is not None:
lowerCAmelCase_ = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = self._gather_logprob(_lowerCamelCase , _lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_lowerCamelCase , -cur_logprob , shape_list(_lowerCamelCase ) )
lowerCAmelCase_ = tf.concat(_lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase_ = tf.reduce_mean(_lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_lowerCamelCase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 274 | '''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowerCamelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowerCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCamelCase , '''env.csv''' ) , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''env.csv''' ) ).exists() )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase ):
self.assertTrue(hasattr(_lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , '''log.txt''' ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''log.txt''' ) ).exists() )
| 274 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
# getting number of pixels in the image
_UpperCAmelCase , _UpperCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
_UpperCAmelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase__ = imread("image_data/lena.jpg", 1)
# convert to its negative
UpperCAmelCase__ = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 639 |
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 633 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Optional[Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A_ : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Tuple = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A_ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A_ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self ):
A_ : str = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = self.get_dummy_inputs(a__ )
A_ : List[str] = inputs["""prompt"""]
A_ : str = inputs["""generator"""]
A_ : Tuple = inputs["""num_inference_steps"""]
A_ : Optional[int] = inputs["""output_type"""]
if "image" in inputs:
A_ : List[str] = inputs["""image"""]
else:
A_ : Optional[int] = None
if "mask_image" in inputs:
A_ : int = inputs["""mask_image"""]
else:
A_ : str = None
if "original_image" in inputs:
A_ : List[Any] = inputs["""original_image"""]
else:
A_ : int = None
A_ , A_ : Optional[int] = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
A_ : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A_ : str = image
if mask_image is not None:
A_ : Dict = mask_image
if original_image is not None:
A_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
A_ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
A_ : Union[str, Any] = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
A_ : Optional[int] = self.get_dummy_inputs(a__ )
A_ : int = inputs["""generator"""]
A_ : List[Any] = inputs["""num_inference_steps"""]
A_ : Dict = inputs["""output_type"""]
# inputs with prompt converted to embeddings
A_ : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A_ : Any = image
if mask_image is not None:
A_ : Optional[int] = mask_image
if original_image is not None:
A_ : int = original_image
A_ : Optional[Any] = pipe_loaded(**a__ )[0]
A_ : Optional[int] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1E-4 )
def _lowerCamelCase ( self ):
A_ : Dict = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Any = self.get_dummy_inputs(a__ )
A_ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
A_ : List[Any] = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A_ : Optional[Any] = self.get_dummy_inputs(a__ )
A_ : Optional[Any] = pipe_loaded(**a__ )[0]
A_ : Union[str, Any] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1E-4 )
| 569 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase__ ="\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCAmelCase__ ="\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCAmelCase__ ="\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )
__lowercase = float(spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : str , A_ : int ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 442 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase__ =12_8022
UpperCAmelCase__ =12_8028
@require_sentencepiece
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Dict = MaMaaaTokenizer
a : Optional[int] = False
a : str = False
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(A_ , range(len(A_ ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(A_ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__lowercase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **A_ : List[Any] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[int] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(A_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2, 3, 4, 5, 6] , )
__lowercase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
__lowercase = tokenizer.convert_tokens_to_string(A_ )
self.assertEqual(A_ , """This is a test""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = {"""input_ids""": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
a : Optional[int] = """facebook/m2m100_418M"""
a : str = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
a : int = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
a : Optional[int] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ):
'''simple docstring'''
__lowercase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
__lowercase = 1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 1_2_8_0_6_3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.tokenizer.get_vocab()
self.assertEqual(len(A_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """en"""
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
__lowercase = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__lowercase = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A_ )
__lowercase = MaMaaaTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.lang_token_to_id , A_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = """en"""
__lowercase = """fr"""
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors="""pt""" )
__lowercase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__lowercase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__lowercase = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__lowercase = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(A_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 1_2_8_0_0_6,
} , )
| 442 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=_A , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=_A , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_A , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case_ = field(
default=_A , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_A , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : int = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase : Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = predict_dataset.features["label"].names
# Labels
lowerCAmelCase : Tuple = len(a_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a_ ,idalabel={str(a_ ): label for i, label in enumerate(a_ )} ,labelaid={label: i for i, label in enumerate(a_ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : int = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Union[str, Any] = False
def preprocess_function(a_ : Dict ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=a_ ,max_length=data_args.max_seq_length ,truncation=a_ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = min(len(a_ ) ,data_args.max_train_samples )
lowerCAmelCase : int = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase : Optional[int] = train_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(a_ ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : int = min(len(a_ ) ,data_args.max_eval_samples )
lowerCAmelCase : Optional[Any] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase : Optional[Any] = eval_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Optional[Any] = min(len(a_ ) ,data_args.max_predict_samples )
lowerCAmelCase : Optional[Any] = predict_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase : Tuple = predict_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowerCAmelCase : str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
lowerCAmelCase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions ,a_ ) else p.predictions
lowerCAmelCase : Any = np.argmax(a_ ,axis=1 )
return metric.compute(predictions=a_ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Union[str, Any] = DataCollatorWithPadding(a_ ,pad_to_multiple_of=8 )
else:
lowerCAmelCase : str = None
# Initialize our Trainer
lowerCAmelCase : Tuple = Trainer(
model=a_ ,args=a_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=a_ ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : List[str] = last_checkpoint
lowerCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
lowerCAmelCase : List[Any] = train_result.metrics
lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,a_ )
trainer.save_metrics("train" ,a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=a_ )
lowerCAmelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("eval" ,a_ )
trainer.save_metrics("eval" ,a_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = trainer.predict(a_ ,metric_key_prefix="predict" )
lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("predict" ,a_ )
trainer.save_metrics("predict" ,a_ )
lowerCAmelCase : Optional[int] = np.argmax(a_ ,axis=1 )
lowerCAmelCase : Any = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(a_ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(a_ ):
lowerCAmelCase : str = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 525 | 0 |
"""simple docstring"""
from PIL import Image
def _lowerCamelCase( a ):
__a , __a = image.size
__a = 0
__a = image.load()
for i in range(a ):
for j in range(a ):
__a = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a ):
for i in range(a ):
__a = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
from functools import lru_cache
def __magic_name__ ( _lowerCamelCase: Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase = 2
lowerCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def __magic_name__ ( _lowerCamelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def __magic_name__ ( _lowerCamelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase = 2
while True:
# Increment each value of a generated range
lowerCAmelCase = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def __magic_name__ ( _lowerCamelCase: str = 4 ) -> int:
'''simple docstring'''
lowerCAmelCase = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 535 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase__ ( _snake_case ):
'''simple docstring'''
@require_torch
def UpperCAmelCase_ ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_SCREAMING_SNAKE_CASE : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_SCREAMING_SNAKE_CASE : Dict = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_SCREAMING_SNAKE_CASE : Optional[Any] = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_SCREAMING_SNAKE_CASE : Optional[Any] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task="""fill-mask""" , model=__snake_case )
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE : Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_SCREAMING_SNAKE_CASE : int = """1"""
_SCREAMING_SNAKE_CASE : str = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCAmelCase_ ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_SCREAMING_SNAKE_CASE : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_SCREAMING_SNAKE_CASE : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_SCREAMING_SNAKE_CASE : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_SCREAMING_SNAKE_CASE : Tuple = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task="""fill-mask""" , model=__snake_case )
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE : Tuple = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_SCREAMING_SNAKE_CASE : str = self.get_env()
_SCREAMING_SNAKE_CASE : Tuple = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCAmelCase_ ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_SCREAMING_SNAKE_CASE : Tuple = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_SCREAMING_SNAKE_CASE : str = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_SCREAMING_SNAKE_CASE : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE : List[str] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_SCREAMING_SNAKE_CASE : List[str] = self.get_env()
_SCREAMING_SNAKE_CASE : Optional[Any] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
_SCREAMING_SNAKE_CASE : List[str] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_SCREAMING_SNAKE_CASE : Dict = """1"""
_SCREAMING_SNAKE_CASE : int = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = """
from transformers import pipeline
"""
_SCREAMING_SNAKE_CASE : int = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_SCREAMING_SNAKE_CASE : Any = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_SCREAMING_SNAKE_CASE : Dict = self.get_env()
_SCREAMING_SNAKE_CASE : Any = """1"""
_SCREAMING_SNAKE_CASE : str = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_SCREAMING_SNAKE_CASE : List[Any] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = """
from transformers import AutoModel
"""
_SCREAMING_SNAKE_CASE : Tuple = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE : List[str] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_env()
_SCREAMING_SNAKE_CASE : List[str] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_SCREAMING_SNAKE_CASE : Optional[Any] = """1"""
_SCREAMING_SNAKE_CASE : Any = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 533 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__SCREAMING_SNAKE_CASE = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = 'https://openaipublic.azureedge.net/jukebox/models/'
__lowerCAmelCase = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _UpperCAmelCase ( __A : int ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
a_ : Dict = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
a_ : str = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
a_ : Any = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
a_ : Dict = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
a_ : Optional[int] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
a_ : Any = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ : Optional[int] = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
a_ : Any = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _UpperCAmelCase ( __A : Optional[int] , __A : List[Any] , __A : Dict , __A : List[Any] ):
a_ : List[str] = {}
import re
a_ : Union[str, Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
a_ : Union[str, Any] = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
a_ : Union[str, Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
a_ : Dict = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
a_ : Union[str, Any] = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
a_ : Any = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
a_ : str = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
a_ : Union[str, Any] = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
a_ : List[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__A ):
a_ : Dict = re_encoder_block_conv_in.match(__A )
a_ : Union[str, Any] = regex_match.groups()
a_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
a_ : Dict = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
a_ : Optional[Any] = re_encoder_block_conv_in.sub(__A , __A )
elif re_encoder_block_resnet.fullmatch(__A ):
a_ : Optional[int] = re_encoder_block_resnet.match(__A )
a_ : Union[str, Any] = regex_match.groups()
a_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
a_ : Union[str, Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
a_ : int = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
a_ : Any = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
a_ : Union[str, Any] = prefix + resnet_block
a_ : Dict = re_encoder_block_resnet.sub(__A , __A )
elif re_encoder_block_proj_out.fullmatch(__A ):
a_ : Tuple = re_encoder_block_proj_out.match(__A )
a_ : Optional[int] = regex_match.groups()
a_ : Any = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
a_ : Dict = re_encoder_block_proj_out.sub(__A , __A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__A ):
a_ : Optional[int] = re_decoder_block_conv_out.match(__A )
a_ : Dict = regex_match.groups()
a_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : str = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
a_ : Any = re_decoder_block_conv_out.sub(__A , __A )
elif re_decoder_block_resnet.fullmatch(__A ):
a_ : str = re_decoder_block_resnet.match(__A )
a_ : List[Any] = regex_match.groups()
a_ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
a_ : str = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
a_ : List[str] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
a_ : Optional[Any] = prefix + resnet_block
a_ : Any = re_decoder_block_resnet.sub(__A , __A )
elif re_decoder_block_proj_in.fullmatch(__A ):
a_ : Optional[int] = re_decoder_block_proj_in.match(__A )
a_ : Tuple = regex_match.groups()
a_ : List[Any] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
a_ : List[Any] = re_decoder_block_proj_in.sub(__A , __A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__A ):
a_ : Dict = re_prior_cond_conv_out.match(__A )
a_ : Union[str, Any] = regex_match.groups()
a_ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Any = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
a_ : Optional[int] = re_prior_cond_conv_out.sub(__A , __A )
elif re_prior_cond_resnet.fullmatch(__A ):
a_ : int = re_prior_cond_resnet.match(__A )
a_ : int = regex_match.groups()
a_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Any = {'''1''': 1, '''3''': 2}[groups[-2]]
a_ : str = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
a_ : Optional[Any] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
a_ : Dict = prefix + resnet_block
a_ : int = re_prior_cond_resnet.sub(__A , __A )
elif re_prior_cond_proj_in.fullmatch(__A ):
a_ : Union[str, Any] = re_prior_cond_proj_in.match(__A )
a_ : Dict = regex_match.groups()
a_ : str = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
a_ : List[str] = re_prior_cond_proj_in.sub(__A , __A )
# keep original key
else:
a_ : Tuple = original_key
a_ : Tuple = replace_key(__A )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
a_ : str = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
a_ : str = original_key
a_ : str = original_key
a_ : Tuple = value
return new_dict
@torch.no_grad()
def _UpperCAmelCase ( __A : Optional[Any]=None , __A : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
a_ : Any = requests.get(f'{PREFIX}{file}' , allow_redirects=__A )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=__A )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
a_ : Dict = MODEL_MAPPING[model_name.split('''/''' )[-1]]
a_ : Dict = JukeboxConfig.from_pretrained(__A )
a_ : List[str] = JukeboxModel(__A )
a_ : int = []
a_ : List[str] = {}
for i, dict_name in enumerate(__A ):
a_ : Dict = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
a_ : Dict = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
a_ : str = old_dic[k]
elif k.endswith('''.w''' ):
a_ : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ : List[Any] = old_dic[k]
else:
a_ : Optional[int] = old_dic[k]
a_ : Optional[Any] = '''vqvae''' if i == 0 else f'priors.{3 - i}'
a_ : List[str] = fix_jukebox_keys(__A , model.state_dict() , __A , __A )
weight_dict.append(__A )
a_ : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(__A )
for i in range(len(__A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__A ).mkdir(exist_ok=__A )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(__A , __A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 466 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCAmelCase ( __A : str , __A : Optional[Any]=1_00 , __A : int=" " ):
a_ : Optional[int] = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def _UpperCAmelCase ( __A : dict ):
a_ , a_ : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else '''''' )
texts.append(__A )
return {"title": titles, "text": texts}
def _UpperCAmelCase ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ):
a_ : int = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
a_ : List[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _UpperCAmelCase ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a_ : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a_ : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
a_ : str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
a_ : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a_ : Union[str, Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
a_ : Union[str, Any] = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
a_ : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a_ : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__A )
# And save the index
a_ : Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case__ = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 466 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , SCREAMING_SNAKE_CASE_ ).groups()[0]
class A__ ( __magic_name__ ):
def __init__( self : Optional[int] , a : Dict , a : Tuple=None , a : Tuple=None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = file_names
lowerCAmelCase__ : str = image_transform
lowerCAmelCase__ : Tuple = label_to_id
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.file_names[idx]
lowerCAmelCase__ : int = PIL.Image.open(a )
lowerCAmelCase__ : Optional[Any] = raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCAmelCase__ : Any = self.image_transform(a )
lowerCAmelCase__ : Dict = extract_label(a )
if self.label_to_id is not None:
lowerCAmelCase__ : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# Initialize accelerator
if args.with_tracking:
lowerCAmelCase__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCAmelCase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config['lr']
lowerCAmelCase__ : str = int(config['num_epochs'] )
lowerCAmelCase__ : List[Any] = int(config['seed'] )
lowerCAmelCase__ : Optional[int] = int(config['batch_size'] )
lowerCAmelCase__ : List[str] = config['image_size']
if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowerCAmelCase__ : str = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCAmelCase__ : List[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCAmelCase__ : List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowerCAmelCase__ : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCAmelCase__ : Optional[Any] = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('.' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Grab all the image filenames
lowerCAmelCase__ : List[Any] = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCAmelCase__ : Optional[int] = [extract_label(SCREAMING_SNAKE_CASE_ ) for fname in file_names]
lowerCAmelCase__ : Tuple = list(set(SCREAMING_SNAKE_CASE_ ) )
id_to_label.sort()
lowerCAmelCase__ : int = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE_ )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE_ )
# Split our filenames between train and validation
lowerCAmelCase__ : Any = np.random.permutation(len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[str] = int(0.8 * len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : str = random_perm[:cut]
lowerCAmelCase__ : Tuple = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCAmelCase__ : int = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCAmelCase__ : Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ )
# For evaluation, we use a deterministic Resize
lowerCAmelCase__ : List[str] = Compose([Resize(SCREAMING_SNAKE_CASE_ ), ToTensor()] )
lowerCAmelCase__ : str = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ )
# Instantiate dataloaders.
lowerCAmelCase__ : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
lowerCAmelCase__ : Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Tuple = create_model('resnet50d' , pretrained=SCREAMING_SNAKE_CASE_ , num_classes=len(SCREAMING_SNAKE_CASE_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : List[Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCAmelCase__ : Any = False
for param in model.get_classifier().parameters():
lowerCAmelCase__ : List[Any] = True
# We normalize the batches of images to be a bit faster.
lowerCAmelCase__ : Optional[int] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCAmelCase__ : int = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCAmelCase__ : str = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE_ , max_lr=SCREAMING_SNAKE_CASE_ , epochs=SCREAMING_SNAKE_CASE_ , steps_per_epoch=len(SCREAMING_SNAKE_CASE_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCAmelCase__ : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ : List[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCAmelCase__ : str = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCAmelCase__ : str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCAmelCase__ : Optional[int] = os.path.splitext(SCREAMING_SNAKE_CASE_ )[0]
if "epoch" in training_difference:
lowerCAmelCase__ : Union[str, Any] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCAmelCase__ : Tuple = None
else:
lowerCAmelCase__ : Any = int(training_difference.replace('step_' , '' ) )
lowerCAmelCase__ : int = resume_step // len(SCREAMING_SNAKE_CASE_ )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
if args.with_tracking:
lowerCAmelCase__ : Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCAmelCase__ : str = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCAmelCase__ : List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ : Dict = (batch['image'] - mean) / std
lowerCAmelCase__ : Any = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCAmelCase__ : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ : str = (batch['image'] - mean) / std
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = outputs.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCAmelCase__ : Optional[Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCAmelCase__ : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'epoch': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
if checkpointing_steps == "epoch":
lowerCAmelCase__ : List[str] = F'''epoch_{epoch}'''
if args.output_dir is not None:
lowerCAmelCase__ : int = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=SCREAMING_SNAKE_CASE_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=SCREAMING_SNAKE_CASE_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
lowerCAmelCase__ : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
if not len(A__ ) == len(A__ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
lowerCamelCase__ = equationa
lowerCamelCase__ = equationa
# Calculate the determinants of the matrices
lowerCamelCase__ = aa * ba - aa * ba
lowerCamelCase__ = ca * ba - ca * ba
lowerCamelCase__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCamelCase__ = determinant_x / determinant
lowerCamelCase__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 481 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
UpperCAmelCase_ : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> List[str]:
UpperCAmelCase_ : str = "UNwant\u00E9d,running"
UpperCAmelCase_ : Optional[int] = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : str = "UNwant\u00E9d,running"
UpperCAmelCase_ : Any = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : int = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ : int = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : str = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : Any = "UNwant\u00E9d,running"
UpperCAmelCase_ : Dict = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = BasicTokenizer()
UpperCAmelCase_ : int = "a\n'll !!to?'d of, can't."
UpperCAmelCase_ : Union[str, Any] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : int = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = i
UpperCAmelCase_ : Dict = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : Dict = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ : int = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_ : Dict = ["的", "人", "有"]
UpperCAmelCase_ : Union[str, Any] = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Dict = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = ['image_processor', 'tokenizer']
lowerCAmelCase = 'Pix2StructImageProcessor'
lowerCAmelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = False
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = 2_0_4_8 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : int , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
lowerCAmelCase = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if "attention_mask" in text_encoding:
lowerCAmelCase = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowerCAmelCase = text_encoding.pop("input_ids" )
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE )
return encoding_image_processor
def __A ( self : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 705 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=1_6 , SCREAMING_SNAKE_CASE : Any=[1, 2, 1] , SCREAMING_SNAKE_CASE : str=[2, 2, 4] , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=2.0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]="gelu" , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=1_0 , SCREAMING_SNAKE_CASE : int=8 , SCREAMING_SNAKE_CASE : str=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE : str=[1, 2, 3] , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = patch_norm
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = is_training
lowerCAmelCase = scope
lowerCAmelCase = use_labels
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = encoder_stride
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ["stem"]
lowerCAmelCase = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __A ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Any ) -> int:
"""simple docstring"""
return
def __A ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE )
@unittest.skip("Swin does not use inputs_embeds" )
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __A ( self : Dict ) -> int:
"""simple docstring"""
pass
def __A ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __A ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def __A ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __A ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def __A ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCAmelCase = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple={} ):
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}. Dict has"
f" `nan`: {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase = MaskFormerSwinConfig
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase = backbone_class(SCREAMING_SNAKE_CASE )
backbone.to(SCREAMING_SNAKE_CASE )
backbone.eval()
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 159 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f'{test_file} instead.' )
UpperCamelCase = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
UpperCamelCase = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCamelCase = """.""".join(SCREAMING_SNAKE_CASE_ )
return test_module_path
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase = get_module_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = importlib.import_module(SCREAMING_SNAKE_CASE_ )
return test_module
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , """all_model_classes""" , [] )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = get_test_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = test_class()
if hasattr(SCREAMING_SNAKE_CASE_ , """setUp""" ):
test.setUp()
UpperCamelCase = None
if hasattr(SCREAMING_SNAKE_CASE_ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase = test.model_tester.__class__
return model_tester
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase = get_test_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = []
for test_class in test_classes:
UpperCamelCase = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = get_test_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ ) for test_class in test_classes}
return test_tester_mapping
def _lowercase ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase = get_model_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_test_mapping
def _lowercase ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase = get_model_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE_ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {to_json(SCREAMING_SNAKE_CASE_ ): to_json(SCREAMING_SNAKE_CASE_ ) for k, v in o.items()}
else:
return o
| 386 |
from __future__ import annotations
def _lowercase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
UpperCamelCase = number_of_bytes // partitions
UpperCamelCase = []
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = i * bytes_per_partition + 1
UpperCamelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 386 | 1 |
import heapq
def lowerCAmelCase_ ( __a ) -> set[int]:
"""simple docstring"""
lowerCamelCase__: list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__a , [-1 * len(__a ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase__: Optional[int] =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase__: Optional[int] =heapq.heappop(__a )[1][0]
chosen_vertices.add(__a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase__: Union[str, Any] =elem[1][1].index(__a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 437 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase_ = 1_0000
lowercase_ = None
lowercase_ = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase_ = ParquetConfig
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
lowerCamelCase__: Optional[Any] =dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase_ , (str, list, tuple)):
lowerCamelCase__: Any =data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__: Optional[int] =[dl_manager.iter_files(UpperCAmelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
lowerCamelCase__: int =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__: str =[dl_manager.iter_files(UpperCAmelCase_) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase_):
with open(UpperCAmelCase_ , "rb") as f:
lowerCamelCase__: Union[str, Any] =datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_))
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={"files": files}))
return splits
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__: str =table_cast(UpperCAmelCase_ , self.info.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: str =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""")
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_)):
with open(UpperCAmelCase_ , "rb") as f:
lowerCamelCase__: Optional[Any] =pq.ParquetFile(UpperCAmelCase_)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
lowerCamelCase__: Optional[Any] =pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCAmelCase_)
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_)}: {e}""")
raise
| 437 | 1 |
__snake_case = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__snake_case = [None] * 10_00_00_00
__snake_case = True
__snake_case = False
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE__ = chain(next_number(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = number_chain
while number < 10000000:
SCREAMING_SNAKE_CASE__ = number_chain
number *= 10
return number_chain
def _lowercase ( UpperCamelCase_ = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , UpperCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 472 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__snake_case = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE__ = json.loads(request.urlopen(UpperCamelCase_ ).read() )['releases'].keys()
return sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : version.Version(UpperCamelCase_ ) )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
init_hf_modules()
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ = re.findall('^\s*import\s+\.(\S+)\s*$' , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , UpperCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCamelCase_ ) )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = [module_file]
SCREAMING_SNAKE_CASE__ = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ).parent
SCREAMING_SNAKE_CASE__ = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) == 0
all_relative_imports.extend(UpperCamelCase_ )
return all_relative_imports
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ = re.findall('^\s*import\s+(\S+)\s*$' , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , UpperCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = []
for imp in imports:
try:
importlib.import_module(UpperCamelCase_ )
except ImportError:
missing_packages.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(UpperCamelCase_ )}. Run `pip install {" ".join(UpperCamelCase_ )}`' )
return get_relative_imports(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE__ = importlib.import_module(UpperCamelCase_ )
if class_name is None:
return find_pipeline_class(UpperCamelCase_ )
return getattr(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ = dict(inspect.getmembers(UpperCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE__ = cls
return pipeline_class
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = module_file_or_url
SCREAMING_SNAKE_CASE__ = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE__ = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE__ = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase_ , pipeline=UpperCamelCase_ )
try:
SCREAMING_SNAKE_CASE__ = cached_download(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = 'git'
SCREAMING_SNAKE_CASE__ = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ = hf_hub_download(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ = check_imports(UpperCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ = F'{module_needed}.py'
shutil.copy(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = model_info(UpperCamelCase_ , revision=UpperCamelCase_ , token=UpperCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCamelCase_ , F'{module_needed}.py' , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , **UpperCamelCase_ , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_cached_module_file(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return get_class_in_module(UpperCamelCase_ , final_module.replace('.py' , '' ) )
| 472 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=1_8 , a__=3_0 , a__=4_0_0 , a__=True , a__=None , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=False , ):
A__ = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
A__ = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_reduce_labels
def snake_case_ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase__ ( )-> Dict:
A__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A__ = Image.open(dataset[0]['''file'''] )
A__ = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCAmelCase__ ( )-> Any:
A__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A__ = Image.open(ds[0]['''file'''] )
A__ = Image.open(ds[1]['''file'''] )
A__ = Image.open(ds[2]['''file'''] )
A__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _UpperCAmelCase ( A__ , unittest.TestCase ):
UpperCamelCase__ = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self):
A__ = BeitImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a__ , '''do_resize'''))
self.assertTrue(hasattr(a__ , '''size'''))
self.assertTrue(hasattr(a__ , '''do_center_crop'''))
self.assertTrue(hasattr(a__ , '''center_crop'''))
self.assertTrue(hasattr(a__ , '''do_normalize'''))
self.assertTrue(hasattr(a__ , '''image_mean'''))
self.assertTrue(hasattr(a__ , '''image_std'''))
def snake_case_ ( self):
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0})
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8})
self.assertEqual(image_processor.do_reduce_labels , a__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=a__)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4})
self.assertEqual(image_processor.do_reduce_labels , a__)
def snake_case_ ( self):
pass
def snake_case_ ( self):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A__ = image_processing(a__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A__ = image_processing(a__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A__ = image_processing(a__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__)
A__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
A__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5)
# Test batched
A__ = image_processing(a__ , a__ , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5)
# Test not batched input (PIL images)
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(a__ , a__ , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5)
# Test batched input (PIL images)
A__ , A__ = prepare_semantic_batch_inputs()
A__ = image_processing(a__ , a__ , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5)
def snake_case_ ( self):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(a__ , a__ , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0)
A__ = True
A__ = image_processing(a__ , a__ , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5)
| 526 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModel)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class _UpperCAmelCase ( _BaseAutoModelClass ):
a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 569 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
import sys
import turtle
def snake_case_ (__A : tuple[float, float] , __A : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def snake_case_ (__A : tuple[float, float] , __A : tuple[float, float] , __A : tuple[float, float] , __A : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__A , get_mid(__A , __A ) , get_mid(__A , __A ) , depth - 1 )
triangle(__A , get_mid(__A , __A ) , get_mid(__A , __A ) , depth - 1 )
triangle(__A , get_mid(__A , __A ) , get_mid(__A , __A ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__UpperCAmelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__UpperCAmelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 651 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ (__A : Path , __A : list ) -> List[Any]:
__lowerCAmelCase : Any = """\n""".join(__A )
Path(__A ).open("""w""" ).writelines(__A )
__UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
__UpperCAmelCase = """sshleifer/bart-tiny-random"""
__UpperCAmelCase = """sshleifer/tiny-mbart"""
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : str = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__lowerCAmelCase : Optional[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Any = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_generate()
assert Path(lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : Union[str, Any] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__lowerCAmelCase : List[str] = Path(self.get_auto_remove_tmp_dir() )
__lowerCAmelCase : List[str] = str(tmp_dir / """scores.json""" )
__lowerCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(lowerCAmelCase , text["""en"""] )
_dump_articles(lowerCAmelCase , text["""de"""] )
__lowerCAmelCase : List[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Dict = f'''
run_eval_search.py
{model}
{str(lowerCAmelCase )}
{str(lowerCAmelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
__lowerCAmelCase : Any = [""" num_beams | length_penalty""", model, """Best score args"""]
__lowerCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase ).exists()
os.remove(Path(lowerCAmelCase ) )
| 651 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a ( lowercase ):
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 700 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a :
UpperCamelCase : str = BlenderbotConfig
UpperCamelCase : int = {}
UpperCamelCase : Tuple = """gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : int = pad_token_id
UpperCAmelCase__ : Union[str, Any] = bos_token_id
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : List[Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
UpperCAmelCase__ : Any = inputs_dict['input_ids']
UpperCAmelCase__ : Optional[int] = input_ids[:1, :]
UpperCAmelCase__ : str = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase__ : str = inputs_dict['head_mask']
UpperCAmelCase__ : List[Any] = 1
# first forward pass
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,):
if attention_mask is None:
UpperCAmelCase__ : Tuple = tf.cast(tf.math.not_equal(_snake_case ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
UpperCAmelCase__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : int = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase : Union[str, Any] = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Union[str, Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = TFBlenderbotModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
UpperCamelCase : List[Any] = ["""My friends are cool but they eat too many carbs."""]
UpperCamelCase : List[str] = """facebook/blenderbot-400M-distill"""
@cached_property
def __snake_case ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __snake_case ( self ):
UpperCAmelCase__ : int = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase__ : int = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase__ : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 254 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase_ = logging.getLogger()
def _lowerCAmelCase ( ) -> Optional[int]:
lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase : Dict =parser.parse_args()
return args.f
def _lowerCAmelCase ( __magic_name__ : Dict ) -> int:
lowercase : int ={}
lowercase : List[Any] =os.path.join(SCREAMING_SNAKE_CASE__ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
lowercase : Tuple =json.load(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def _lowerCAmelCase ( ) -> Union[str, Any]:
lowercase : Optional[int] =torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( __A ):
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowercase : List[Any] =tempfile.mkdtemp()
lowercase : List[Any] =os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase : int =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls : List[Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase : Optional[Any] =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.get_auto_remove_tmp_dir()
lowercase : Any =F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase : str =get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : List[str] =get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowercase : Tuple =7 if get_gpu_count() > 1 else 2
lowercase : int =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : int =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] =self.get_auto_remove_tmp_dir()
lowercase : Any =F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : Any =get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Tuple =self.get_auto_remove_tmp_dir()
lowercase : int =F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : str =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.get_auto_remove_tmp_dir()
lowercase : Optional[int] =F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : Dict =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =self.get_auto_remove_tmp_dir()
lowercase : Optional[int] =F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowercase : str =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
lowercase : List[Any] =self.get_auto_remove_tmp_dir()
lowercase : List[Any] =F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
lowercase : Optional[Any] =get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.get_auto_remove_tmp_dir()
lowercase : Optional[int] =F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase : Dict =get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) )
| 92 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 'new-model'
if is_tf_available():
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= NewModelConfig
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = "bert-base-cased"
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = "bert-base-cased"
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForPreTraining.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(__A )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(__A , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(__A )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(__A , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(__A )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(__A , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForQuestionAnswering.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
@require_tensorflow_probability
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(__A )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
__A , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(__A )
self.assertIsInstance(__A , __A )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_44_10 )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(__A )
self.assertIsInstance(__A , __A )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_44_10 )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__A , __A )
UpperCAmelCase__ = copy.deepcopy(model.config )
UpperCAmelCase__ = ["FunnelBaseModel"]
UpperCAmelCase__ = TFAutoModel.from_config(__A )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__A )
UpperCAmelCase__ = TFAutoModel.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("new-model" , __A )
UpperCAmelCase__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__A ):
auto_class.register(__A , __A )
auto_class.register(__A , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
auto_class.register(__A , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ = BertModelTester(self ).get_config()
UpperCAmelCase__ = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ = auto_class.from_config(__A )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__A )
UpperCAmelCase__ = auto_class.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__A , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase__ = TFAutoModel.from_pretrained("bert-base" )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
__A , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase__ = TFAutoModel.from_pretrained(__A , revision="aaaaaa" )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
__A , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
UpperCAmelCase__ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(__A , "Use `from_pt=True` to load this model" ):
UpperCAmelCase__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCAmelCase__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
UpperCAmelCase__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 718 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A = logging.get_logger(__name__)
class lowercase__ :
def __init__( self : List[str] , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Dict=None , _lowercase : Any=None ):
"""simple docstring"""
if not conversation_id:
UpperCAmelCase__ = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase__ = []
if generated_responses is None:
UpperCAmelCase__ = []
UpperCAmelCase__ = conversation_id
UpperCAmelCase__ = past_user_inputs
UpperCAmelCase__ = generated_responses
UpperCAmelCase__ = text
def __eq__( self : Any , _lowercase : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self : Any , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
UpperCAmelCase__ = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
UpperCAmelCase__ = text
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCAmelCase__ = None
def _UpperCAmelCase ( self : List[str] , _lowercase : str ):
"""simple docstring"""
self.generated_responses.append(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
UpperCAmelCase__ = "user" if is_user else "bot"
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , *_lowercase : Union[str, Any] , **_lowercase : Any ):
"""simple docstring"""
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
UpperCAmelCase__ = self.tokenizer.eos_token
def _UpperCAmelCase ( self : List[str] , _lowercase : int=None , _lowercase : Any=None , _lowercase : List[str]=None , **_lowercase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
if min_length_for_response is not None:
UpperCAmelCase__ = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase__ = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase__ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Dict , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : List[str]=0 , **_lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Conversation , _lowercase : List[Any]=32 ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
UpperCAmelCase__ = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase__ = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
UpperCAmelCase__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCAmelCase__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any]=10 , **_lowercase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length )
UpperCAmelCase__ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
UpperCAmelCase__ = max_length - minimum_tokens
UpperCAmelCase__ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase__ = model_inputs["attention_mask"][:, -trim:]
UpperCAmelCase__ = model_inputs.pop("conversation" )
UpperCAmelCase__ = max_length
UpperCAmelCase__ = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
UpperCAmelCase__ = 1
else:
UpperCAmelCase__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = model_outputs["output_ids"]
UpperCAmelCase__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
UpperCAmelCase__ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def _UpperCAmelCase ( self : List[str] , _lowercase : Conversation ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.eos_token_id
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
UpperCAmelCase__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 277 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ ={'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "roformer"
def __init__(self : Dict , snake_case_ : Optional[Any]=5_0_0_0_0 , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=7_6_8 , snake_case_ : Dict=1_2 , snake_case_ : Optional[int]=1_2 , snake_case_ : Optional[Any]=3_0_7_2 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[Any]=1_5_3_6 , snake_case_ : Any=2 , snake_case_ : Optional[int]=0.02 , snake_case_ : int=1E-12 , snake_case_ : Union[str, Any]=0 , snake_case_ : Any=False , snake_case_ : Dict=True , **snake_case_ : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
__a : Dict = vocab_size
__a : Optional[Any] = hidden_size if embedding_size is None else embedding_size
__a : Optional[Any] = hidden_size
__a : Any = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : List[Any] = hidden_act
__a : List[Any] = intermediate_size
__a : List[str] = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : List[Any] = type_vocab_size
__a : List[Any] = initializer_range
__a : Dict = layer_norm_eps
__a : List[str] = rotary_value
__a : Optional[Any] = use_cache
class UpperCamelCase__ ( __lowercase ):
@property
def lowerCAmelCase (self : Union[str, Any] ):
if self.task == "multiple-choice":
__a : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : Optional[int] = {0: '''batch''', 1: '''sequence'''}
__a : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 521 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( lowercase_ ):
'''simple docstring'''
a :List[Any] = '''data2vec-vision'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase)
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class UpperCamelCase_ ( lowercase_ ):
'''simple docstring'''
a :Optional[int] = version.parse('1.11' )
@property
def lowercase__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowercase__ ( self):
return 1E-4
| 720 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Optional[Any] = 'camembert'
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class UpperCamelCase_ ( A ):
'''simple docstring'''
@property
def lowercase__ ( self):
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 413 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ (__A ):
__magic_name__ = ''''''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(self , **lowerCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase_ : Dict = fsspec.open(
lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase_ : Any = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase_ : Any = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if self.dir_cache is None:
UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase_ : str = {f["name"]: f}
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCamelCase_ (__A ):
__magic_name__ = '''bz2'''
__magic_name__ = '''bz2'''
__magic_name__ = '''.bz2'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''gzip'''
__magic_name__ = '''gzip'''
__magic_name__ = '''.gz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''lz4'''
__magic_name__ = '''lz4'''
__magic_name__ = '''.lz4'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''xz'''
__magic_name__ = '''xz'''
__magic_name__ = '''.xz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''zstd'''
__magic_name__ = '''zstd'''
__magic_name__ = '''.zst'''
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase_ : Optional[Any] = self.file.__enter__
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = file_
def __enter__( self : Tuple ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]:
self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __iter__( self : Optional[int] ) -> int:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return next(self._file )
def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return getattr(self._file , lowerCAmelCase_ )
def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ):
return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = fixed_enter
| 95 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220 | 0 |
import qiskit
def __lowercase ( UpperCAmelCase__ = 2 ):
"""simple docstring"""
__lowerCAmelCase = qubits
# Using Aer's simulator
__lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__lowerCAmelCase = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase__ ) ) , list(range(UpperCAmelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowerCAmelCase = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1_000 )
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 703 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""facebook/bart-large-mnli"""
__UpperCAmelCase =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__UpperCAmelCase ="""text_classifier"""
__UpperCAmelCase =AutoTokenizer
__UpperCAmelCase =AutoModelForSequenceClassification
__UpperCAmelCase =["""text""", ["""text"""]]
__UpperCAmelCase =["""text"""]
def A__ ( self ):
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__lowerCAmelCase = int(_A )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def A__ ( self , _A , _A ):
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(_A ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , _A ):
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 102 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def _a ( *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
pass
def __UpperCamelCase ( lowercase_ : List[str] ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
_a : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model=A_ , tokenizer=A_ , image_processor=A_ )
a_ = INVOICE_URL
a_ = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
a_ = 'What is the placebo?'
a_ = [
{
'image': load_image(A_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _a ( self , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = dqa_pipeline(A_ , top_k=2 )
self.assertEqual(
A_ , [
[
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
a_ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
a_ = INVOICE_URL
a_ = 'How many cats are there?'
a_ = [
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(A_ , [] )
# We can optionnally pass directly the words and bounding boxes
a_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
a_ = []
a_ = []
a_ = dqa_pipeline(image=A_ , question=A_ , words=A_ , boxes=A_ , top_k=2 )
self.assertEqual(A_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ):
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=A_ )
a_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=A_ , revision='3dc6de3' , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
a_ = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
# This model should also work if `image` is set to None
a_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ):
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=A_ )
a_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=A_ , revision='3dc6de3' , max_seq_len=50 , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
a_ = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
# This model should also work if `image` is set to None
a_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def _a ( self ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _a ( self ):
"""simple docstring"""
pass
| 536 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Optional[int] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
snake_case: str = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' )
return image
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case: List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __A : str , __A : List[str] , __A : Dict ):
'''simple docstring'''
snake_case: List[Any] = dct.pop(__A )
snake_case: int = val
def lowerCAmelCase_ ( __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case: List[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
snake_case: Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
snake_case: Dict = torch.cat((q_bias, torch.zeros_like(__A , requires_grad=__A ), v_bias) )
snake_case: Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( __A : Any , __A : Tuple ):
'''simple docstring'''
snake_case: str = 3_64 if 'coco' in model_name else 2_24
snake_case: Optional[Any] = BlipaVisionConfig(image_size=__A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case: Any = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__A ).to_dict()
elif "opt-6.7b" in model_name:
snake_case: List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__A ).to_dict()
elif "t5-xl" in model_name:
snake_case: Optional[int] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case: Optional[int] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
snake_case: Optional[Any] = BlipaConfig(vision_config=__A , text_config=__A )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( __A : List[str] , __A : str=None , __A : List[Any]=False ):
'''simple docstring'''
snake_case: Optional[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
snake_case: Optional[int] = tokenizer('\n' , add_special_tokens=__A ).input_ids[0]
snake_case: str = get_blipa_config(__A , eos_token_id=__A )
snake_case: Union[str, Any] = BlipaForConditionalGeneration(__A ).eval()
snake_case: List[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
snake_case: Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
snake_case: List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case: Union[str, Any] = load_model_and_preprocess(
name=__A , model_type=__A , is_eval=__A , device=__A )
original_model.eval()
print('Done!' )
# update state dict keys
snake_case: Optional[int] = original_model.state_dict()
snake_case: List[str] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case: Union[str, Any] = state_dict.pop(__A )
if key.startswith('Qformer.bert' ):
snake_case: str = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
snake_case: Optional[Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
snake_case: Tuple = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
snake_case: List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
snake_case: List[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
snake_case: List[Any] = key.replace('t5' , 'language' )
snake_case: Tuple = val
# read in qv biases
read_in_q_v_bias(__A , __A )
snake_case: Optional[int] = hf_model.load_state_dict(__A , strict=__A )
assert len(__A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case: Union[str, Any] = load_demo_image()
snake_case: Tuple = vis_processors['eval'](__A ).unsqueeze(0 ).to(__A )
snake_case: Optional[Any] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__A )
# create processor
snake_case: Dict = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__A , image_std=__A )
snake_case: Any = BlipaProcessor(image_processor=__A , tokenizer=__A )
snake_case: str = processor(images=__A , return_tensors='pt' ).pixel_values.to(__A )
# make sure processor creates exact same pixel values
assert torch.allclose(__A , __A )
original_model.to(__A )
hf_model.to(__A )
with torch.no_grad():
if "opt" in model_name:
snake_case: Dict = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
snake_case: List[Any] = hf_model(__A , __A ).logits
else:
snake_case: List[str] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
snake_case: Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
snake_case: str = hf_model(__A , __A , labels=__A ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case: List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=__A )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case: str = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=__A )
else:
# cast to same type
snake_case: Tuple = logits.dtype
assert torch.allclose(original_logits.to(__A ) , __A , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
snake_case: Optional[int] = ''
snake_case: Union[str, Any] = tokenizer(__A , return_tensors='pt' ).input_ids.to(__A )
snake_case: List[Any] = original_model.generate({'image': original_pixel_values} )
snake_case: Optional[Any] = hf_model.generate(
__A , __A , do_sample=__A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __A )
snake_case: List[str] = input_ids.shape[1]
snake_case: Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__A )
snake_case: Any = [text.strip() for text in output_text]
print('HF generation:' , __A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 711 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 692 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=None , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = TFViTModel(config=snake_case_ )
__a = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(snake_case_ , interpolate_pos_encoding=snake_case_ , training=snake_case_ )
__a = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = self.type_sequence_label_size
__a = TFViTForImageClassification(snake_case_ )
__a = model(snake_case_ , labels=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(snake_case_ , interpolate_pos_encoding=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = TFViTForImageClassification(snake_case_ )
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : Optional[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = TFViTModelTester(self )
__a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(snake_case_ )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCAmelCase ( self ):
__a = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(snake_case_ )
def lowercase ( ) -> Tuple:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
__a = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=snake_case_ , return_tensors='''tf''' )
# forward pass
__a = model(**snake_case_ )
# verify the logits
__a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__a = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
| 695 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __a :
def __init__( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple=13 , snake_case_ : Optional[int]=10 , snake_case_ : str=3 , snake_case_ : Optional[int]=2 , snake_case_ : List[Any]=2 , snake_case_ : int=2 , snake_case_ : Any=True , snake_case_ : Optional[Any]=True , snake_case_ : List[str]=32 , snake_case_ : Any=5 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Tuple="gelu" , snake_case_ : str=0.1 , snake_case_ : Any=0.1 , snake_case_ : List[Any]=10 , snake_case_ : Tuple=0.0_2 , snake_case_ : List[str]=0.9 , snake_case_ : List[Any]=None , )-> Tuple:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =image_size
__lowerCAmelCase =num_channels
__lowerCAmelCase =patch_size
__lowerCAmelCase =tubelet_size
__lowerCAmelCase =num_frames
__lowerCAmelCase =is_training
__lowerCAmelCase =use_labels
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_act
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =type_sequence_label_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =mask_ratio
__lowerCAmelCase =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowerCAmelCase =(image_size // patch_size) ** 2
__lowerCAmelCase =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowerCAmelCase =int(mask_ratio * self.seq_length)
def UpperCamelCase ( self : Any)-> int:
__lowerCAmelCase =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase =None
if self.use_labels:
__lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Optional[int])-> List[str]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any])-> Union[str, Any]:
__lowerCAmelCase =VideoMAEModel(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple)-> Optional[int]:
__lowerCAmelCase =VideoMAEForPreTraining(snake_case_)
model.to(snake_case_)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCAmelCase =torch.ones((self.num_masks,))
__lowerCAmelCase =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
__lowerCAmelCase =mask.expand(self.batch_size , -1).bool()
__lowerCAmelCase =model(snake_case_ , snake_case_)
# model only returns predictions for masked patches
__lowerCAmelCase =mask.sum().item()
__lowerCAmelCase =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def UpperCamelCase ( self : List[Any])-> List[str]:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : List[Any])-> List[Any]:
__lowerCAmelCase =VideoMAEModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37)
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=False)-> Tuple:
__lowerCAmelCase =copy.deepcopy(snake_case_)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCAmelCase =torch.ones((self.model_tester.num_masks,))
__lowerCAmelCase =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
__lowerCAmelCase =mask.expand(self.model_tester.batch_size , -1).bool()
__lowerCAmelCase =bool_masked_pos.to(snake_case_)
if return_labels:
if model_class in [
*get_values(snake_case_),
]:
__lowerCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
return inputs_dict
def UpperCamelCase ( self : List[str])-> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""")
def UpperCamelCase ( self : Dict)-> str:
pass
def UpperCamelCase ( self : Union[str, Any])-> Tuple:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =model_class(snake_case_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear))
def UpperCamelCase ( self : Optional[int])-> List[str]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =model_class(snake_case_)
__lowerCAmelCase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase =[*signature.parameters.keys()]
__lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_)
def UpperCamelCase ( self : str)-> List[Any]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def UpperCamelCase ( self : List[Any])-> Any:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_)
@slow
def UpperCamelCase ( self : List[Any])-> Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =VideoMAEModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
def UpperCamelCase ( self : List[Any])-> List[str]:
if not self.has_attentions:
pass
else:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =True
for model_class in self.all_model_classes:
__lowerCAmelCase =self.model_tester.seq_length - self.model_tester.num_masks
__lowerCAmelCase =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowerCAmelCase =True
__lowerCAmelCase =False
__lowerCAmelCase =True
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase =model(**self._prepare_for_class(snake_case_ , snake_case_))
__lowerCAmelCase =outputs.attentions
self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase =True
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase =model(**self._prepare_for_class(snake_case_ , snake_case_))
__lowerCAmelCase =outputs.attentions
self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowerCAmelCase =len(snake_case_)
# Check attention is always last and order is fine
__lowerCAmelCase =True
__lowerCAmelCase =True
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase =model(**self._prepare_for_class(snake_case_ , snake_case_))
self.assertEqual(out_len + 1 , len(snake_case_))
__lowerCAmelCase =outputs.attentions
self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self : Tuple)-> Any:
def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : int):
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase =model(**self._prepare_for_class(snake_case_ , snake_case_))
__lowerCAmelCase =outputs.hidden_states
__lowerCAmelCase =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case_) , snake_case_)
__lowerCAmelCase =self.model_tester.seq_length - self.model_tester.num_masks
__lowerCAmelCase =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase =True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_)
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase ( self : Optional[Any])-> int:
pass
def __lowerCAmelCase ( ) -> Any:
__lowerCAmelCase =hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : str)-> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Optional[Any])-> Tuple:
__lowerCAmelCase =VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""").to(
snake_case_)
__lowerCAmelCase =self.default_image_processor
__lowerCAmelCase =prepare_video()
__lowerCAmelCase =image_processor(snake_case_ , return_tensors="""pt""").to(snake_case_)
# forward pass
with torch.no_grad():
__lowerCAmelCase =model(**snake_case_)
# verify the logits
__lowerCAmelCase =torch.Size((1, 4_00))
self.assertEqual(outputs.logits.shape , snake_case_)
__lowerCAmelCase =torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1]).to(snake_case_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4))
@slow
def UpperCamelCase ( self : str)-> str:
__lowerCAmelCase =VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""").to(snake_case_)
__lowerCAmelCase =self.default_image_processor
__lowerCAmelCase =prepare_video()
__lowerCAmelCase =image_processor(snake_case_ , return_tensors="""pt""").to(snake_case_)
# add boolean mask, indicating which patches to mask
__lowerCAmelCase =hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""")
__lowerCAmelCase =torch.load(snake_case_)
# forward pass
with torch.no_grad():
__lowerCAmelCase =model(**snake_case_)
# verify the logits
__lowerCAmelCase =torch.Size([1, 14_08, 15_36])
__lowerCAmelCase =torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=snake_case_)
self.assertEqual(outputs.logits.shape , snake_case_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `True`)
__lowerCAmelCase =torch.tensor([0.5_1_4_2] , device=snake_case_)
self.assertTrue(torch.allclose(outputs.loss , snake_case_ , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `False`)
__lowerCAmelCase =VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=snake_case_).to(
snake_case_)
with torch.no_grad():
__lowerCAmelCase =model(**snake_case_)
__lowerCAmelCase =torch.tensor(torch.tensor([0.6_4_6_9]) , device=snake_case_)
self.assertTrue(torch.allclose(outputs.loss , snake_case_ , atol=1e-4))
| 354 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase : Dict =sys.version_info >= (3, 10)
def _lowerCAmelCase (_lowerCAmelCase=None , _lowerCAmelCase=None):
return field(default_factory=lambda: default , metadata=_lowerCAmelCase)
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = 42
lowercase__ = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = False
lowercase__ = True
lowercase__ = None
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """titi"""
lowercase__ = """toto"""
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """titi"""
lowercase__ = """toto"""
lowercase__ = 42
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = """toto"""
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BasicEnum(self.foo )
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = """toto"""
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = MixedTypeEnum(self.foo )
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = None
lowercase__ = field(default=a_ , metadata={"""help""": """help message"""} )
lowercase__ = None
lowercase__ = list_field(default=[] )
lowercase__ = list_field(default=[] )
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = list_field(default=[] )
lowercase__ = list_field(default=[1, 2, 3] )
lowercase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
lowercase__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = field()
lowercase__ = field()
lowercase__ = field()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BasicEnum(self.required_enum )
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = 42
lowercase__ = field()
lowercase__ = None
lowercase__ = field(default="""toto""" , metadata={"""help""": """help message"""} )
lowercase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = False
lowercase__ = True
lowercase__ = None
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = None
lowercase__ = field(default=a_ , metadata={"""help""": """help message"""} )
lowercase__ = None
lowercase__ = list_field(default=[] )
lowercase__ = list_field(default=[] )
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase_ = {k: v for k, v in vars(snake_case__ ).items() if k != "container"}
UpperCamelCase_ = {k: v for k, v in vars(snake_case__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , snake_case__ ) and yy.get("choices" , snake_case__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](snake_case__ ) , yy["type"](snake_case__ ) )
del xx["type"], yy["type"]
self.assertEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=snake_case__ , required=snake_case__ )
expected.add_argument("--bar" , type=snake_case__ , required=snake_case__ )
expected.add_argument("--baz" , type=snake_case__ , required=snake_case__ )
expected.add_argument("--flag" , type=snake_case__ , default=snake_case__ , const=snake_case__ , nargs="?" )
self.argparsersEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase_ ) , ) = parser.parse_args_into_dataclasses(snake_case__ , look_for_args_file=snake_case__ )
self.assertFalse(example.flag )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=snake_case__ )
expected.add_argument("--baz" , default="toto" , type=snake_case__ , help="help message" )
self.argparsersEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=snake_case__ , default=snake_case__ , const=snake_case__ , nargs="?" )
expected.add_argument("--baz" , type=snake_case__ , default=snake_case__ , const=snake_case__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=snake_case__ , dest="baz" )
expected.add_argument("--opt" , type=snake_case__ , default=snake_case__ )
UpperCamelCase_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case__ )
for dataclass_type in dataclass_types:
UpperCamelCase_ = HfArgumentParser(snake_case__ )
self.argparsersEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(snake_case__ , Namespace(foo=snake_case__ , baz=snake_case__ , opt=snake_case__ ) )
UpperCamelCase_ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(snake_case__ , Namespace(foo=snake_case__ , baz=snake_case__ , opt=snake_case__ ) )
UpperCamelCase_ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(snake_case__ , Namespace(foo=snake_case__ , baz=snake_case__ , opt=snake_case__ ) )
UpperCamelCase_ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(snake_case__ , Namespace(foo=snake_case__ , baz=snake_case__ , opt=snake_case__ ) )
UpperCamelCase_ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(snake_case__ , Namespace(foo=snake_case__ , baz=snake_case__ , opt=snake_case__ ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase_ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase_ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase_ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCamelCase ( self ):
'''simple docstring'''
@dataclass
class _lowercase :
'''simple docstring'''
lowercase__ = """toto"""
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=snake_case__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=snake_case__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=snake_case__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=snake_case__ )
self.argparsersEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(
snake_case__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase_ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(snake_case__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=snake_case__ , type=snake_case__ )
expected.add_argument("--bar" , default=snake_case__ , type=snake_case__ , help="help message" )
expected.add_argument("--baz" , default=snake_case__ , type=snake_case__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=snake_case__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=snake_case__ )
UpperCamelCase_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case__ )
for dataclass_type in dataclass_types:
UpperCamelCase_ = HfArgumentParser(snake_case__ )
self.argparsersEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(snake_case__ , Namespace(foo=snake_case__ , bar=snake_case__ , baz=snake_case__ , ces=[] , des=[] ) )
UpperCamelCase_ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(snake_case__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=snake_case__ , required=snake_case__ )
expected.add_argument("--required_str" , type=snake_case__ , required=snake_case__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=snake_case__ , )
self.argparsersEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=snake_case__ , required=snake_case__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=snake_case__ , )
expected.add_argument("--opt" , type=snake_case__ , default=snake_case__ )
expected.add_argument("--baz" , default="toto" , type=snake_case__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=snake_case__ )
self.argparsersEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase_ = parser.parse_dict(snake_case__ )[0]
UpperCamelCase_ = BasicExample(**snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(snake_case__ , parser.parse_dict , snake_case__ , allow_extra_keys=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = os.path.join(snake_case__ , "temp_json" )
os.mkdir(snake_case__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase_ = BasicExample(**snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = os.path.join(snake_case__ , "temp_yaml" )
os.mkdir(snake_case__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(snake_case__ , snake_case__ )
UpperCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase_ = BasicExample(**snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = HfArgumentParser(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 704 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase (a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = StableDiffusionXLImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase_ = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
UpperCamelCase_ = CLIPTextModel(snake_case__ )
UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=snake_case__ )
UpperCamelCase_ = CLIPTextModelWithProjection(snake_case__ )
UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=snake_case__ )
UpperCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowerCamelCase ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCamelCase_ = image / 2 + 0.5
if str(snake_case__ ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(snake_case__ )
else:
UpperCamelCase_ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = StableDiffusionXLImgaImgPipeline(**snake_case__ )
UpperCamelCase_ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase_ = self.get_dummy_inputs(snake_case__ )
UpperCamelCase_ = sd_pipe(**snake_case__ ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_ = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = StableDiffusionXLImgaImgPipeline(**snake_case__ )
UpperCamelCase_ = sd_pipe.to(snake_case__ )
UpperCamelCase_ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
# forward without prompt embeds
UpperCamelCase_ = self.get_dummy_inputs(snake_case__ )
UpperCamelCase_ = 3 * ["this is a negative prompt"]
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = 3 * [inputs["prompt"]]
UpperCamelCase_ = sd_pipe(**snake_case__ )
UpperCamelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase_ = self.get_dummy_inputs(snake_case__ )
UpperCamelCase_ = 3 * ["this is a negative prompt"]
UpperCamelCase_ = 3 * [inputs.pop("prompt" )]
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = sd_pipe.encode_prompt(snake_case__ , negative_prompt=snake_case__ )
UpperCamelCase_ = sd_pipe(
**snake_case__ , prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , pooled_prompt_embeds=snake_case__ , negative_pooled_prompt_embeds=snake_case__ , )
UpperCamelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
'''simple docstring'''
UpperCamelCase_ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCamelCase_ = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
UpperCamelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase_ = self.get_inputs(snake_case__ )
UpperCamelCase_ = pipe(**snake_case__ ).images
UpperCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 504 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> list[float]:
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = coefficient_matrix.shape
lowercase__ , lowercase__ : str = constant_matrix.shape
if rowsa != colsa:
lowercase__ : Optional[int] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowercase_ )
if colsa != 1:
lowercase__ : List[str] = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowercase_ )
if rowsa != rowsa:
lowercase__ : List[str] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowercase_ )
if len(lowercase_ ) != rowsa:
lowercase__ : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowercase_ )} and {rowsa}'
)
raise ValueError(lowercase_ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowercase__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase__ , lowercase__ : Dict = table.shape
strictly_diagonally_dominant(lowercase_ )
# Iterates the whole matrix for given number of times
for _ in range(lowercase_ ):
lowercase__ : List[Any] = []
for row in range(lowercase_ ):
lowercase__ : Optional[int] = 0
for col in range(lowercase_ ):
if col == row:
lowercase__ : Optional[int] = table[row][col]
elif col == cols - 1:
lowercase__ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase__ : Dict = (temp + val) / denom
new_val.append(lowercase_ )
lowercase__ : Any = new_val
return [float(lowercase_ ) for i in new_val]
def UpperCamelCase ( lowercase_ ) -> bool:
'''simple docstring'''
lowercase__ , lowercase__ : Any = table.shape
lowercase__ : str = True
for i in range(0 , lowercase_ ):
lowercase__ : List[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 433 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = jnp.ones((batch_size, length) ) / length
return scores
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = None
__lowerCAmelCase : Dict = 20
__lowerCAmelCase : Union[str, Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase )
# tweak scores to not be uniform anymore
__lowerCAmelCase : Optional[Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCAmelCase : List[str] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCAmelCase : Any = jax.nn.softmax(lowerCAmelCase , axis=-1 )
__lowerCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCAmelCase : Tuple = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase , scores.copy() , cur_len=lowerCAmelCase ) , axis=-1 )
__lowerCAmelCase : str = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase , scores.copy() , cur_len=lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = None
__lowerCAmelCase : List[Any] = 10
__lowerCAmelCase : List[Any] = 2
# create ramp distribution
__lowerCAmelCase : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCAmelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCAmelCase : Dict = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase : Dict = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCAmelCase : int = 5
__lowerCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCAmelCase : Tuple = np.broadcast_to(np.arange(lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
__lowerCAmelCase : List[Any] = top_k_warp_safety_check(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[int] = 10
__lowerCAmelCase : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCAmelCase : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCAmelCase : str = FlaxTopPLogitsWarper(0.8 )
__lowerCAmelCase : List[Any] = np.exp(top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCAmelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__lowerCAmelCase : List[str] = np.broadcast_to(np.arange(lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCAmelCase : int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__lowerCAmelCase : List[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCAmelCase : Any = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = 20
__lowerCAmelCase : Dict = 4
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase )
# check that min length is applied at length 5
__lowerCAmelCase : int = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCAmelCase : Union[str, Any] = 5
__lowerCAmelCase : List[Any] = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = min_dist_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__lowerCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = 15
__lowerCAmelCase : int = min_dist_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertFalse(jnp.isinf(lowerCAmelCase ).any() )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = 20
__lowerCAmelCase : int = 4
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
__lowerCAmelCase : Tuple = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCAmelCase : int = 3
__lowerCAmelCase : Union[str, Any] = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertFalse(jnp.isinf(lowerCAmelCase ).any() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = 20
__lowerCAmelCase : str = 4
__lowerCAmelCase : int = 0
__lowerCAmelCase : List[str] = 5
__lowerCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCAmelCase : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : Any = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCAmelCase : int = 3
__lowerCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertFalse(jnp.isinf(lowerCAmelCase ).any() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Dict = 15
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : List[Any] = 15
# dummy input_ids and scores
__lowerCAmelCase : Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase )
__lowerCAmelCase : int = input_ids.copy()
__lowerCAmelCase : Optional[int] = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scores.copy()
# instantiate all dist processors
__lowerCAmelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase )
__lowerCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase )
__lowerCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase )
__lowerCAmelCase : int = 10
# no processor list
__lowerCAmelCase : List[str] = temp_dist_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : Dict = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : str = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : int = min_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = bos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : int = eos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# with processor list
__lowerCAmelCase : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase : List[str] = processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = 4
__lowerCAmelCase : List[Any] = 10
__lowerCAmelCase : str = 15
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Union[str, Any] = 15
# dummy input_ids and scores
__lowerCAmelCase : str = ids_tensor((batch_size, sequence_length) , lowerCAmelCase )
__lowerCAmelCase : List[Any] = input_ids.copy()
__lowerCAmelCase : int = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = scores.copy()
# instantiate all dist processors
__lowerCAmelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase )
__lowerCAmelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase )
__lowerCAmelCase : str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
__lowerCAmelCase : List[Any] = temp_dist_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : List[str] = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : Dict = min_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : Dict = bos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
__lowerCAmelCase : Tuple = eos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
return scores
# with processor list
def run_processor_list(lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
__lowerCAmelCase : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase : Tuple = processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
return scores
__lowerCAmelCase : int = jax.jit(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = jax.jit(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = jitted_run_no_processor_list(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = jitted_run_processor_list(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 720 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case_ (__A : str = "" ) -> dict[str, float]:
__lowerCAmelCase : str = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
__lowerCAmelCase : Union[str, Any] = BeautifulSoup(requests.get(__A ).text , """html.parser""" )
__lowerCAmelCase : int = soup.find_all("""td""" , attrs="""titleColumn""" )
__lowerCAmelCase : int = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__A , __A )
}
def snake_case_ (__A : str = "IMDb_Top_250_Movies.csv" ) -> None:
__lowerCAmelCase : int = get_imdb_top_aaa_movies()
with open(__A , """w""" , newline="""""" ) as out_file:
__lowerCAmelCase : Dict = csv.writer(__A )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 218 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
SCREAMING_SNAKE_CASE = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase_ : List[str] = '''nezha'''
def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any]=21_128 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Dict=3_072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[str]=1e-12 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Tuple=True , **UpperCAmelCase_ : Union[str, Any] , )-> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 554 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ = 60_08_51_47_51_43 )-> int:
"""simple docstring"""
try:
UpperCamelCase = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCamelCase = 2
UpperCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCamelCase = i
while n % i == 0:
UpperCamelCase = n // i
i += 1
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 554 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 707 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _snake_case = 128 , _snake_case = 256 , _snake_case = 2_000.0 , _snake_case = 768 , _snake_case = 12 , _snake_case = 12 , _snake_case = 64 , _snake_case = 2048 , _snake_case = 0.1 , ) -> Tuple:
super().__init__()
_UpperCamelCase : Optional[Any] = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case ) , nn.SiLU() , )
_UpperCamelCase : List[Any] = nn.Embedding(_snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[int] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_UpperCamelCase : Optional[int] = nn.Dropout(p=_snake_case )
_UpperCamelCase : int = nn.ModuleList()
for lyr_num in range(_snake_case ):
# FiLM conditional T5 decoder
_UpperCamelCase : Tuple = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
self.decoders.append(_snake_case )
_UpperCamelCase : List[str] = TaLayerNorm(_snake_case )
_UpperCamelCase : List[str] = nn.Dropout(p=_snake_case )
_UpperCamelCase : Any = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Any = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> str:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_UpperCamelCase : Dict = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_UpperCamelCase : Any = self.conditioning_emb(_snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_UpperCamelCase : Optional[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_UpperCamelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_UpperCamelCase : Union[str, Any] = self.position_encoding(_snake_case )
_UpperCamelCase : List[Any] = self.continuous_inputs_projection(_snake_case )
inputs += position_encodings
_UpperCamelCase : Tuple = self.dropout(_snake_case )
# decoder: No padding present.
_UpperCamelCase : Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_UpperCamelCase : Optional[int] = [(x, self.encoder_decoder_mask(_snake_case , _snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_UpperCamelCase : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_UpperCamelCase : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_UpperCamelCase : Optional[int] = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
_UpperCamelCase : List[str] = self.decoder_norm(_snake_case )
_UpperCamelCase : int = self.post_dropout(_snake_case )
_UpperCamelCase : Dict = self.spec_out(_snake_case )
return spec_out
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=1E-6 ) -> str:
super().__init__()
_UpperCamelCase : int = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case ) )
def _lowercase ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ) -> Optional[Any]:
_UpperCamelCase : int = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
_UpperCamelCase : Optional[int] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_UpperCamelCase : int = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
_UpperCamelCase : Dict = self.layer[-1](_snake_case , _snake_case )
return (hidden_states,)
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
super().__init__()
_UpperCamelCase : Any = TaLayerNorm(_snake_case )
_UpperCamelCase : int = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
_UpperCamelCase : Dict = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
_UpperCamelCase : List[Any] = nn.Dropout(_snake_case )
def _lowercase ( self , _snake_case , _snake_case=None , _snake_case=None , ) -> List[str]:
# pre_self_attention_layer_norm
_UpperCamelCase : Optional[Any] = self.layer_norm(_snake_case )
if conditioning_emb is not None:
_UpperCamelCase : Optional[int] = self.FiLMLayer(_snake_case , _snake_case )
# Self-attention block
_UpperCamelCase : str = self.attention(_snake_case )
_UpperCamelCase : List[Any] = hidden_states + self.dropout(_snake_case )
return hidden_states
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
super().__init__()
_UpperCamelCase : Tuple = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
_UpperCamelCase : str = TaLayerNorm(_snake_case , eps=_snake_case )
_UpperCamelCase : Tuple = nn.Dropout(_snake_case )
def _lowercase ( self , _snake_case , _snake_case=None , _snake_case=None , ) -> List[Any]:
_UpperCamelCase : Tuple = self.layer_norm(_snake_case )
_UpperCamelCase : Tuple = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1 ) , )
_UpperCamelCase : Union[str, Any] = hidden_states + self.dropout(_snake_case )
return layer_output
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
super().__init__()
_UpperCamelCase : int = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
_UpperCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
_UpperCamelCase : Any = TaLayerNorm(_snake_case , eps=_snake_case )
_UpperCamelCase : Dict = nn.Dropout(_snake_case )
def _lowercase ( self , _snake_case , _snake_case=None ) -> Any:
_UpperCamelCase : List[Any] = self.layer_norm(_snake_case )
if conditioning_emb is not None:
_UpperCamelCase : List[str] = self.film(_snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = self.DenseReluDense(_snake_case )
_UpperCamelCase : Any = hidden_states + self.dropout(_snake_case )
return hidden_states
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Dict:
super().__init__()
_UpperCamelCase : Tuple = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_UpperCamelCase : int = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_UpperCamelCase : Dict = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_UpperCamelCase : int = nn.Dropout(_snake_case )
_UpperCamelCase : Optional[int] = NewGELUActivation()
def _lowercase ( self , _snake_case ) -> Dict:
_UpperCamelCase : int = self.act(self.wi_a(_snake_case ) )
_UpperCamelCase : List[Any] = self.wi_a(_snake_case )
_UpperCamelCase : Any = hidden_gelu * hidden_linear
_UpperCamelCase : Optional[Any] = self.dropout(_snake_case )
_UpperCamelCase : Tuple = self.wo(_snake_case )
return hidden_states
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=1E-6 ) -> Optional[Any]:
super().__init__()
_UpperCamelCase : Dict = nn.Parameter(torch.ones(_snake_case ) )
_UpperCamelCase : int = eps
def _lowercase ( self , _snake_case ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_UpperCamelCase : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_snake_case )
_UpperCamelCase : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_UpperCamelCase : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def _lowercase ( self , _snake_case ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_snake_case , 3.0 )) ))
class UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ) -> str:
super().__init__()
_UpperCamelCase : Tuple = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case )
def _lowercase ( self , _snake_case , _snake_case ) -> Optional[int]:
_UpperCamelCase : Tuple = self.scale_bias(_snake_case )
_UpperCamelCase, _UpperCamelCase : Any = torch.chunk(_snake_case , 2 , -1 )
_UpperCamelCase : str = x * (1 + scale) + shift
return x
| 683 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( _UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ProphetNetTokenizer
UpperCAmelCase__ : Dict = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
super().setUp()
__lowerCamelCase: List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCamelCase: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCamelCase: Dict = """UNwant\u00E9d,running"""
__lowerCamelCase: str = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: Optional[int] = self.tokenizer_class(self.vocab_file )
__lowerCamelCase: int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: str = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Optional[int] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Dict = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCamelCase: Optional[Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Any = i
__lowerCamelCase: List[Any] = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: Any = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCamelCase: Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCamelCase: List[str] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__lowerCamelCase: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Any = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCamelCase: Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 189 |
from __future__ import annotations
_A : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : dict[str, list[str]] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Dict = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase: dict[str, str | None] = {}
__lowerCamelCase: str = source_vertex
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Optional[int] = {self.source_vertex}
__lowerCamelCase: Optional[int] = None
__lowerCamelCase: Optional[int] = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase: int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase: List[str] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
__lowerCamelCase: Optional[int] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F'->{target_vertex}'
if __name__ == "__main__":
_A : int = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 189 | 1 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( UpperCAmelCase_ ):
'''simple docstring'''
A_ : Union[str, Any] = (CMStochasticIterativeScheduler,)
A_ : int = 10
def UpperCAmelCase_ ( self , **__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**__snake_case )
return config
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = 10
_SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0](**__snake_case )
scheduler.set_timesteps(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps[0]
_SCREAMING_SNAKE_CASE : List[Any] = scheduler.timesteps[1]
_SCREAMING_SNAKE_CASE : int = self.dummy_sample
_SCREAMING_SNAKE_CASE : str = 0.1 * sample
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
_SCREAMING_SNAKE_CASE : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def UpperCAmelCase_ ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Any = scheduler_class(**__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = 1
scheduler.set_timesteps(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps
_SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
_SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__snake_case ):
# 1. scale model input
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(__snake_case , __snake_case )
# 2. predict noise residual
_SCREAMING_SNAKE_CASE : Optional[int] = model(__snake_case , __snake_case )
# 3. predict previous sample x_t-1
_SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
_SCREAMING_SNAKE_CASE : str = pred_prev_sample
_SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(__snake_case ) )
_SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Dict = scheduler_class(**__snake_case )
_SCREAMING_SNAKE_CASE : str = [106, 0]
scheduler.set_timesteps(timesteps=__snake_case )
_SCREAMING_SNAKE_CASE : Any = scheduler.timesteps
_SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = self.dummy_model()
_SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_SCREAMING_SNAKE_CASE : List[str] = scheduler.scale_model_input(__snake_case , __snake_case )
# 2. predict noise residual
_SCREAMING_SNAKE_CASE : List[Any] = model(__snake_case , __snake_case )
# 3. predict previous sample x_t-1
_SCREAMING_SNAKE_CASE : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
_SCREAMING_SNAKE_CASE : List[Any] = pred_prev_sample
_SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(__snake_case ) )
_SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**__snake_case )
_SCREAMING_SNAKE_CASE : int = [39, 30, 12, 15, 0]
with self.assertRaises(__snake_case , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [39, 30, 12, 1, 0]
_SCREAMING_SNAKE_CASE : List[Any] = len(__snake_case )
with self.assertRaises(__snake_case , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__snake_case , timesteps=__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**__snake_case )
_SCREAMING_SNAKE_CASE : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__snake_case , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__snake_case )
| 533 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPFeatureExtractor"""]
lowerCAmelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411 | 0 |
"""simple docstring"""
from __future__ import annotations
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : list[list[int]] ):
"""simple docstring"""
_lowercase : Tuple = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCamelCase_ ) != 0:
_lowercase : Dict = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase_ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise error
_lowercase : int = rows
else:
_lowercase : Optional[Any] = []
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return len(self.rows )
@property
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return len(self.rows[0] )
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.order[0] == self.order[1]
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return bool(self.determinant() )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : List[str] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase_ ).determinant()
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase_ , lowerCamelCase_ )
return -1 * self.get_minor(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCamelCase_ , lowerCamelCase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Dict = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ):
"""simple docstring"""
return str(self.rows )
def __str__( self : str ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCamelCase_ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : list[int] , lowerCamelCase_ : int | None = None ):
"""simple docstring"""
_lowercase : Union[str, Any] = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise type_error
if len(lowerCamelCase_ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCamelCase_ )
else:
_lowercase : Any = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : list[int] , lowerCamelCase_ : int | None = None ):
"""simple docstring"""
_lowercase : Tuple = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise type_error
if len(lowerCamelCase_ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_lowercase : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_lowercase : Any = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Union[str, Any] , lowerCamelCase_ : object ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , lowerCamelCase_ : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : Optional[int] , lowerCamelCase_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , lowerCamelCase_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Optional[Any] , lowerCamelCase_ : Matrix | int | float ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase_ , lowerCamelCase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_lowercase : str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __UpperCAmelCase ( cls : str , lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 | """simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[int] = int(__UpperCAmelCase )
_lowercase , _lowercase , _lowercase : Union[str, Any] = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=300 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Optional[Any] = F'''{elt:.6f}''' if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else str(__UpperCAmelCase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase :
_snake_case = 5
_snake_case = 0.2
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase_ : int = 3_0_0 , ):
"""simple docstring"""
_lowercase : int = total
_lowercase : Dict = '' if prefix is None else prefix
_lowercase : List[Any] = leave
_lowercase : Dict = parent
_lowercase : str = width
_lowercase : Any = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : str = None ):
"""simple docstring"""
_lowercase : Dict = value
if comment is not None:
_lowercase : List[str] = comment
if self.last_value is None:
_lowercase : List[Any] = time.time()
_lowercase : str = value
_lowercase : Any = None
_lowercase : List[Any] = self.warmup
_lowercase : Union[str, Any] = 1
self.update_bar(lowerCamelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : Tuple = time.time()
_lowercase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : List[Any] = self.elapsed_time / (value - self.start_value)
else:
_lowercase : List[Any] = None
if value >= self.total:
_lowercase : Optional[Any] = self.total
_lowercase : str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : int = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase_ )
_lowercase : Optional[int] = value
_lowercase : Dict = current_time
if self.average_time_per_item is None:
_lowercase : Union[str, Any] = 1
else:
_lowercase : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
_lowercase : str = ' ' * (len(str(self.total ) ) - len(str(lowerCamelCase_ ) )) + str(lowerCamelCase_ )
if self.elapsed_time is None:
_lowercase : Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Optional[int] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
_lowercase : int = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=None ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
_lowercase : Tuple = None if column_names is None else [column_names]
_lowercase : Optional[int] = None
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if self.inner_table is None:
_lowercase : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
_lowercase : str = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase_ )
_lowercase : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : int=3_0_0 ):
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase_ , prefix=lowerCamelCase_ , parent=self , width=lowerCamelCase_ )
return self.child_bar
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Any = None
self.display()
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
_lowercase : int = None
_lowercase : Any = None
_lowercase : Optional[int] = False
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowercase : Tuple = NotebookTrainingTracker(state.max_steps , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
_lowercase : Tuple = False
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : str ):
"""simple docstring"""
if not has_length(lowerCamelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Tuple = self.training_tracker.add_child(len(lowerCamelCase_ ) )
else:
_lowercase : Optional[Any] = NotebookProgressBar(len(lowerCamelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Any = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : Any = state.global_step
self.training_tracker.write_line(lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : List[str] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_lowercase : Union[str, Any] = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Optional[int] = int(state.epoch )
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : int = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_lowercase : Tuple = re.sub(r'\_loss$' , '' , lowerCamelCase_ )
_lowercase : Optional[Any] = metrics.pop('total_flos' , lowerCamelCase_ )
_lowercase : List[str] = metrics.pop('epoch' , lowerCamelCase_ )
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCamelCase_ )
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCamelCase_ )
_lowercase : Any = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCamelCase_ )
_lowercase : Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCamelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Optional[Any] = v
else:
_lowercase : Tuple = k.split('_' )
_lowercase : List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
_lowercase : List[Any] = v
self.training_tracker.write_line(lowerCamelCase_ )
self.training_tracker.remove_child()
_lowercase : Any = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Dict = True
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCamelCase_ )
_lowercase : List[Any] = None
| 283 | 1 |
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowercase , __lowercase = array[indexa], array[indexa]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
for i in range(lowerCamelCase , low + middle ):
comp_and_swap(lowerCamelCase , lowerCamelCase , i + middle , lowerCamelCase )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
bitonic_merge(lowerCamelCase , low + middle , lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
bitonic_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase , 1 )
bitonic_sort(lowerCamelCase , low + middle , lowerCamelCase , 0 )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 80 |
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 | 1 |
import re
import subprocess
import sys
__UpperCAmelCase = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__UpperCAmelCase = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__UpperCAmelCase = '|'.join(sys.argv[1:])
__UpperCAmelCase = re.compile(rf"""^({joined_dirs}).*?\.py$""")
__UpperCAmelCase = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 719 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = ['pixel_values']
def __init__( self ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = 1 / 255 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = True ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE : str = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Any = crop_size
SCREAMING_SNAKE_CASE : List[str] = do_flip_channel_order
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = PIL.Image.BILINEAR ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : int = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=size['shortest_edge'] ,default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__SCREAMING_SNAKE_CASE ,size=(size['height'], size['width']) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
return flip_channel_order(__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE : Optional[int] = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 220 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = Path(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
UpperCAmelCase__ : int = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase__ : Tuple = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify) | 407 |
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( UpperCamelCase__ ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 407 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( _lowercase ):
a = """SpeechT5FeatureExtractor"""
a = """SpeechT5Tokenizer"""
def __init__( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: str ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self: Tuple , *UpperCamelCase__: Dict , **UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = kwargs.pop("""audio""" , UpperCamelCase__ )
lowerCamelCase__ : Any = kwargs.pop("""text""" , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = kwargs.pop("""text_target""" , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = kwargs.pop("""audio_target""" , UpperCamelCase__ )
lowerCamelCase__ : int = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
lowerCamelCase__ : Tuple = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
elif text is not None:
lowerCamelCase__ : Any = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = None
if audio_target is not None:
lowerCamelCase__ : Optional[Any] = self.feature_extractor(audio_target=UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = targets["""input_values"""]
elif text_target is not None:
lowerCamelCase__ : Dict = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : List[Any] = targets["""input_ids"""]
else:
lowerCamelCase__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : str = labels
lowerCamelCase__ : Any = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCamelCase__ : int = decoder_attention_mask
return inputs
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: Dict , **UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = kwargs.pop("""input_values""" , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = kwargs.pop("""input_ids""" , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = kwargs.pop("""labels""" , UpperCamelCase__ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
lowerCamelCase__ : int = self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
elif input_ids is not None:
lowerCamelCase__ : Union[str, Any] = self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and "input_ids" in labels[0]):
lowerCamelCase__ : Dict = self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = targets["""input_ids"""]
else:
lowerCamelCase__ : int = self.feature_extractor.feature_size
lowerCamelCase__ : Union[str, Any] = self.feature_extractor.num_mel_bins
lowerCamelCase__ : Optional[Any] = self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : List[Any] = feature_size_hack
lowerCamelCase__ : List[str] = targets["""input_values"""]
else:
lowerCamelCase__ : List[str] = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : str = labels
lowerCamelCase__ : Optional[Any] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCamelCase__ : List[Any] = decoder_attention_mask
return inputs
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: Tuple , **UpperCamelCase__: Tuple ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , *UpperCamelCase__: int , **UpperCamelCase__: Tuple ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( a_: List[Any] ):
for param in module.parameters():
_UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase : Tuple = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : List[Any] = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = datetime.now()
_UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" )
return timestamp | 494 | '''simple docstring'''
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[str] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCAmelCase : Optional[Any] = 6
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Optional[int] = 1_901
_UpperCAmelCase : str = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCAmelCase : Dict = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCAmelCase : int = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCAmelCase : int = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 494 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if len(UpperCamelCase ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase , n - 1 )
rec_insertion_sort(UpperCamelCase , n - 1 )
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
'''simple docstring'''
if index >= len(UpperCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_a = (
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase , index + 1 )
if __name__ == "__main__":
_snake_case : Any = input('Enter integers separated by spaces: ')
_snake_case : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 711 |
'''simple docstring'''
_snake_case : Any = tuple[float, float, float]
_snake_case : Optional[int] = tuple[float, float, float]
def snake_case_ (UpperCamelCase : Pointad , UpperCamelCase : Pointad ):
'''simple docstring'''
_a = end_pointa[0] - end_pointa[0]
_a = end_pointa[1] - end_pointa[1]
_a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def snake_case_ (UpperCamelCase : Vectorad , UpperCamelCase : Vectorad ):
'''simple docstring'''
_a = ab[1] * ac[2] - ab[2] * ac[1] # *i
_a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def snake_case_ (UpperCamelCase : Vectorad , UpperCamelCase : int ):
'''simple docstring'''
return tuple(round(UpperCamelCase , UpperCamelCase ) for x in vector ) == (0, 0, 0)
def snake_case_ (UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : int = 10 ):
'''simple docstring'''
_a = create_vector(UpperCamelCase , UpperCamelCase )
_a = create_vector(UpperCamelCase , UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
| 377 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : int = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : Optional[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__lowerCamelCase : Dict = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__lowerCamelCase : Any = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__lowerCamelCase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__lowerCamelCase : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__lowerCamelCase : Optional[int] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __magic_name__ ( A__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Tuple =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Tuple =DPRContextEncoderTokenizer
class __magic_name__ ( A__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : int =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Union[str, Any] =DPRQuestionEncoderTokenizer
__lowerCamelCase : Optional[Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__lowerCamelCase : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__lowerCamelCase : Any = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A__ )
class __magic_name__ :
def __call__( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : List[Any] , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
UpperCAmelCase = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
UpperCAmelCase = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), F'There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.'
UpperCAmelCase = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
UpperCAmelCase = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
UpperCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : BatchEncoding , UpperCamelCase__ : DPRReaderOutput , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
UpperCAmelCase = reader_input["input_ids"]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reader_output[:3]
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
UpperCAmelCase = []
for doc_id in sorted_docs:
UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
UpperCAmelCase = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __magic_name__ ( A__, A__ ):
lowercase : Dict =VOCAB_FILES_NAMES
lowercase : Optional[Any] =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Any =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : Any =['''input_ids''', '''attention_mask''']
lowercase : Dict =DPRReaderTokenizer
| 323 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCamelCase : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__lowerCamelCase : int = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
__lowerCamelCase : Optional[int] = "|".join(sys.argv[1:])
__lowerCamelCase : Dict = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCamelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 323 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_lowerCamelCase =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=False, ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase_, lowerCAmelCase_, f=output_path.as_posix(), input_names=lowerCAmelCase_, output_names=lowerCAmelCase_, dynamic_axes=lowerCAmelCase_, do_constant_folding=lowerCAmelCase_, use_external_data_format=lowerCAmelCase_, enable_onnx_checker=lowerCAmelCase_, opset_version=lowerCAmelCase_, )
else:
export(
lowerCAmelCase_, lowerCAmelCase_, f=output_path.as_posix(), input_names=lowerCAmelCase_, output_names=lowerCAmelCase_, dynamic_axes=lowerCAmelCase_, do_constant_folding=lowerCAmelCase_, opset_version=lowerCAmelCase_, )
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE ='cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE ='cpu'
SCREAMING_SNAKE_CASE =StableDiffusionPipeline.from_pretrained(lowerCAmelCase_, torch_dtype=lowerCAmelCase_ ).to(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =Path(lowerCAmelCase_ )
# TEXT ENCODER
SCREAMING_SNAKE_CASE =pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE =pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE =pipeline.tokenizer(
'A sample prompt', padding='max_length', max_length=pipeline.tokenizer.model_max_length, truncation=lowerCAmelCase_, return_tensors='pt', )
onnx_export(
pipeline.text_encoder, model_args=(text_input.input_ids.to(device=lowerCAmelCase_, dtype=torch.intaa )), output_path=output_path / 'text_encoder' / 'model.onnx', ordered_input_names=['input_ids'], output_names=['last_hidden_state', 'pooler_output'], dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
}, opset=lowerCAmelCase_, )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE =pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE =pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE =output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet, model_args=(
torch.randn(2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
torch.randn(2 ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
torch.randn(2, lowerCAmelCase_, lowerCAmelCase_ ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
False,
), output_path=lowerCAmelCase_, ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'], output_names=['out_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
}, opset=lowerCAmelCase_, use_external_data_format=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE =os.path.dirname(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =onnx.load(lowerCAmelCase_ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase_ )
os.mkdir(lowerCAmelCase_ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase_, lowerCAmelCase_, save_as_external_data=lowerCAmelCase_, all_tensors_to_one_file=lowerCAmelCase_, location='weights.pb', convert_attribute=lowerCAmelCase_, )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE =pipeline.vae
SCREAMING_SNAKE_CASE =vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE =lambda lowerCAmelCase_, lowerCAmelCase_ : vae_encoder.encode(lowerCAmelCase_, lowerCAmelCase_ )[0].sample()
onnx_export(
lowerCAmelCase_, model_args=(
torch.randn(1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
False,
), output_path=output_path / 'vae_encoder' / 'model.onnx', ordered_input_names=['sample', 'return_dict'], output_names=['latent_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=lowerCAmelCase_, )
# VAE DECODER
SCREAMING_SNAKE_CASE =pipeline.vae
SCREAMING_SNAKE_CASE =vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE =vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE =vae_encoder.decode
onnx_export(
lowerCAmelCase_, model_args=(
torch.randn(1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
False,
), output_path=output_path / 'vae_decoder' / 'model.onnx', ordered_input_names=['latent_sample', 'return_dict'], output_names=['sample'], dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=lowerCAmelCase_, )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE =pipeline.safety_checker
SCREAMING_SNAKE_CASE =safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE =safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker, model_args=(
torch.randn(
1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
torch.randn(1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ).to(device=lowerCAmelCase_, dtype=lowerCAmelCase_ ),
), output_path=output_path / 'safety_checker' / 'model.onnx', ordered_input_names=['clip_input', 'images'], output_names=['out_images', 'has_nsfw_concepts'], dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
}, opset=lowerCAmelCase_, )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE =OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
SCREAMING_SNAKE_CASE =pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ), scheduler=pipeline.scheduler, safety_checker=lowerCAmelCase_, feature_extractor=lowerCAmelCase_, requires_safety_checker=safety_checker is not None, )
onnx_pipeline.save_pretrained(lowerCAmelCase_ )
print('ONNX pipeline saved to', lowerCAmelCase_ )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_, provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowerCamelCase =parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 705 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if not len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =equationa
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE =aa * ba - aa * ba
SCREAMING_SNAKE_CASE =ca * ba - ca * ba
SCREAMING_SNAKE_CASE =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE =determinant_x / determinant
SCREAMING_SNAKE_CASE =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 252 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = (IPNDMScheduler,)
_lowerCamelCase : List[str] = (('num_inference_steps', 5_0),)
def __A ( self : Optional[Any] , **UpperCAmelCase : str ):
A_ = {"num_train_timesteps": 1000}
config.update(**UpperCAmelCase )
return config
def __A ( self : List[str] , UpperCAmelCase : Dict=0 , **UpperCAmelCase : List[Any] ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
A_ = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
A_ = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Optional[int] , **UpperCAmelCase : List[str] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def __A ( self : int ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , "set_timesteps" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : Any ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase , time_step=UpperCAmelCase )
def __A ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10 | 86 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = len(matrix[0] )
snake_case_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for row in range(_SCREAMING_SNAKE_CASE ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _SCREAMING_SNAKE_CASE ):
snake_case_ = matrix[col][row] / matrix[row][row]
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
snake_case_ = True
for i in range(row + 1 , _SCREAMING_SNAKE_CASE ):
if matrix[i][row] != 0:
snake_case_ , snake_case_ = matrix[i], matrix[row]
snake_case_ = False
break
if reduce:
rank -= 1
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 582 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
a_ :Tuple ="""resnet"""
a_ :int =["""basic""", """bottleneck"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : Dict=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , SCREAMING_SNAKE_CASE__ : Optional[int]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE__ : Optional[Any]="bottleneck" , SCREAMING_SNAKE_CASE__ : str="relu" , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
__a = num_channels
__a = embedding_size
__a = hidden_sizes
__a = depths
__a = layer_type
__a = hidden_act
__a = downsample_in_first_stage
__a = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Any =version.parse("""1.11""" )
@property
def __a ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 1E-3
| 582 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A = logging.get_logger(__name__)
# General docstring
A = "RegNetConfig"
# Base docstring
A = "facebook/regnet-y-040"
A = [1, 1088, 7, 7]
# Image classification docstring
A = "facebook/regnet-y-040"
A = "tabby, tabby cat"
A = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 3 , UpperCamelCase__ = 1 , UpperCamelCase__ = 1 , UpperCamelCase__ = "relu" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
SCREAMING_SNAKE_CASE_ : str = nn.BatchNormad(lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.convolution(lowercase_ )
SCREAMING_SNAKE_CASE_ : str = self.normalization(lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
SCREAMING_SNAKE_CASE_ : Any = config.num_channels
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
SCREAMING_SNAKE_CASE_ : Any = self.embedder(lowercase_ )
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 2 ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.convolution(lowercase_ )
SCREAMING_SNAKE_CASE_ : str = self.normalization(lowercase_ )
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __snake_case ( self , UpperCamelCase__ ):
# b c h w -> b c 1 1
SCREAMING_SNAKE_CASE_ : List[str] = self.pooler(lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.attention(lowercase_ )
SCREAMING_SNAKE_CASE_ : Any = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Optional[int] = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
SCREAMING_SNAKE_CASE_ : int = ACTaFN[config.hidden_act]
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = hidden_state
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.layer(lowercase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : str = self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : int = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
SCREAMING_SNAKE_CASE_ : Optional[int] = ACTaFN[config.hidden_act]
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_state
SCREAMING_SNAKE_CASE_ : str = self.layer(lowercase_ )
SCREAMING_SNAKE_CASE_ : int = self.shortcut(lowercase_ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = True ):
SCREAMING_SNAKE_CASE_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __a ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = RegNetConfig
UpperCAmelCase__ : List[str] = """regnet"""
UpperCAmelCase__ : Any = """pixel_values"""
UpperCAmelCase__ : str = True
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=False ):
if isinstance(lowercase_ , lowercase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = value
A = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
A = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super().__init__(lowercase_ )
SCREAMING_SNAKE_CASE_ : List[str] = config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RegNetEncoder(lowercase_ )
SCREAMING_SNAKE_CASE_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ):
SCREAMING_SNAKE_CASE_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : str = self.embedder(lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super().__init__(lowercase_ )
SCREAMING_SNAKE_CASE_ : Any = config.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = RegNetModel(lowercase_ )
# classification head
SCREAMING_SNAKE_CASE_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ : List[Any] = self.classifier(lowercase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ : str = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ : str = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ : Optional[int] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ : Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
SCREAMING_SNAKE_CASE_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states ) | 708 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = image_processor(UpperCamelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Dict = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = processor(text=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer(UpperCamelCase__ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 97 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase = logging.getLogger(__name__)
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any]=2, UpperCAmelCase_ : List[str]=3, UpperCAmelCase_ : List[str]=16, UpperCAmelCase_ : int = 10, UpperCAmelCase_ : int = 2 ) -> int:
"""simple docstring"""
def get_dataset(UpperCAmelCase_ : Optional[Any] ):
A__ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(UpperCAmelCase_, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
A__ = get_dataset(UpperCAmelCase_ )
A__ = get_dataset(UpperCAmelCase_ )
A__ = DataLoader(UpperCAmelCase_, shuffle=UpperCAmelCase_, batch_size=UpperCAmelCase_, num_workers=4 )
A__ = DataLoader(UpperCAmelCase_, shuffle=UpperCAmelCase_, batch_size=UpperCAmelCase_, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Dict, UpperCAmelCase_ : Tuple, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Any, UpperCAmelCase_ : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for epoch in range(UpperCAmelCase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ = batch
A__ = model(UpperCAmelCase_ )
A__ = torch.nn.functional.mse_loss(UpperCAmelCase_, UpperCAmelCase_ )
accelerator.backward(UpperCAmelCase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> int:
super().__init__()
A__ = nn.Parameter(torch.randn(1 ) )
A__ = nn.Parameter(torch.randn(1 ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return x * self.a + self.b
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ = dummy_dataloaders()
A__ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE__ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE__ )
# Train baseline
A__ = Accelerator(project_config=SCREAMING_SNAKE_CASE__ )
A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ = dummy_dataloaders()
# Train baseline
A__ = Accelerator()
A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save initial
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , "initial" )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
A__ = train(3 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ = dummy_dataloaders()
A__ = Accelerator()
A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = train(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save everything
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoint" )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
test_rands += train(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ = dummy_dataloaders()
A__ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE__ )
# Train baseline
A__ = Accelerator(project_dir=SCREAMING_SNAKE_CASE__ , project_config=SCREAMING_SNAKE_CASE__ )
A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
A__ = train(3 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ = dummy_dataloaders()
A__ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE__ )
A__ = Accelerator(project_dir=SCREAMING_SNAKE_CASE__ , project_config=SCREAMING_SNAKE_CASE__ )
A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoints" , "checkpoint_0" ) )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = train(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((A__) , (A__)) = model.a.item(), model.b.item()
A__ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> str:
A__ = torch.tensor([1, 2, 3] )
A__ = torch.tensor([2, 3, 4] )
A__ = DummyModel()
A__ = torch.optim.Adam(net.parameters() )
A__ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def snake_case__ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE__ , step_size=1 , gamma=0.9_9 )
A__ , A__ = dummy_dataloaders()
A__ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE__ )
# Train baseline
A__ = Accelerator(project_dir=SCREAMING_SNAKE_CASE__ , project_config=SCREAMING_SNAKE_CASE__ )
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save initial
accelerator.save_state()
A__ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , scheduler.state_dict() )
def snake_case__ ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ = DummyModel()
A__ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE__ , total_limit=2 )
# Train baseline
A__ = Accelerator(project_dir=SCREAMING_SNAKE_CASE__ , project_config=SCREAMING_SNAKE_CASE__ )
A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def snake_case__ ( self ) -> int:
A__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase = """/tmp/accelerate/state_checkpointing"""
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase , UpperCamelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
UpperCamelCase = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
UpperCamelCase = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 104 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : int = 'trajectory_transformer'
lowerCAmelCase_ : Dict = ['past_key_values']
lowerCAmelCase_ : Optional[int] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self :Optional[Any] , UpperCamelCase__ :Any=100 , UpperCamelCase__ :Optional[Any]=5 , UpperCamelCase__ :int=1 , UpperCamelCase__ :List[str]=1 , UpperCamelCase__ :Optional[int]=249 , UpperCamelCase__ :Union[str, Any]=6 , UpperCamelCase__ :Optional[int]=17 , UpperCamelCase__ :Tuple=25 , UpperCamelCase__ :int=4 , UpperCamelCase__ :int=4 , UpperCamelCase__ :Dict=128 , UpperCamelCase__ :str=0.1 , UpperCamelCase__ :Tuple=0.1 , UpperCamelCase__ :Tuple=0.1 , UpperCamelCase__ :Dict=0.0006 , UpperCamelCase__ :int=512 , UpperCamelCase__ :Tuple=0.02 , UpperCamelCase__ :Optional[int]=1E-12 , UpperCamelCase__ :Optional[int]=1 , UpperCamelCase__ :Any=True , UpperCamelCase__ :Any=1 , UpperCamelCase__ :Any=50_256 , UpperCamelCase__ :str=50_256 , **UpperCamelCase__ :Optional[Any] , ):
_a = vocab_size
_a = action_weight
_a = reward_weight
_a = value_weight
_a = max_position_embeddings
_a = block_size
_a = action_dim
_a = observation_dim
_a = transition_dim
_a = learning_rate
_a = n_layer
_a = n_head
_a = n_embd
_a = embd_pdrop
_a = attn_pdrop
_a = resid_pdrop
_a = initializer_range
_a = layer_norm_eps
_a = kaiming_initializer_range
_a = use_cache
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 388 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase=[1, 2, 1] , _lowerCAmelCase=[2, 2, 4] , _lowerCAmelCase=2 , _lowerCAmelCase=2.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=8 , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = patch_norm
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = is_training
_lowerCAmelCase = scope
_lowerCAmelCase = use_labels
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = encoder_stride
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCamelCase : Tuple = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Any = False
__lowerCamelCase : Any = False
__lowerCamelCase : Dict = False
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = SwinvaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def _snake_case ( self ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
_lowerCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = config.window_size**2
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_lowerCAmelCase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_lowerCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = reshaped_hidden_states[0].shape
_lowerCAmelCase = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> str:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 489 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 489 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a = True , _a = False ) -> Optional[Any]:
_a : Tuple = scheduler
_a : List[Any] = optimizers if isinstance(_a , (list, tuple) ) else [optimizers]
_a : Any = split_batches
_a : Dict = step_with_optimizer
_a : Tuple = GradientState()
def __lowercase ( self , *_a , **_a ) -> int:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_a , **_a )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_a , **_a )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_a : Tuple = AcceleratorState().num_processes
for _ in range(_a ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_a , **_a )
else:
self.scheduler.step(*_a , **_a )
def __lowercase ( self ) -> int:
return self.scheduler.get_last_lr()
def __lowercase ( self ) -> Optional[int]:
return self.scheduler.state_dict()
def __lowercase ( self , _a ) -> Dict:
self.scheduler.load_state_dict(_a )
def __lowercase ( self ) -> str:
return self.scheduler.get_lr()
def __lowercase ( self , *_a , **_a ) -> List[str]:
return self.scheduler.print_lr(*_a , **_a )
| 14 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_snake_case : Any = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None ):
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE ) | 493 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case : Optional[Any] = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 493 | 1 |
'''simple docstring'''
from collections.abc import Callable
class A__ :
def __init__( self : Any , _a : Callable | None = None ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
# Stores indexes of each item for supporting updates and deletion.
_SCREAMING_SNAKE_CASE ={}
# Stores current size of heap.
_SCREAMING_SNAKE_CASE =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_SCREAMING_SNAKE_CASE =key or (lambda _a : x)
def A ( self : Union[str, Any] , _a : int ) -> Dict:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def A ( self : Optional[Any] , _a : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(2 * i + 1 )
return left if 0 < left < self.size else None
def A ( self : Any , _a : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(2 * i + 2 )
return right if 0 < right < self.size else None
def A ( self : str , _a : int , _a : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_SCREAMING_SNAKE_CASE =self.arr[j], self.arr[i]
def A ( self : Tuple , _a : int , _a : int ) -> Optional[Any]:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def A ( self : Optional[Any] , _a : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._left(snake_case_ )
_SCREAMING_SNAKE_CASE =self._right(snake_case_ )
_SCREAMING_SNAKE_CASE =i
if left is not None and not self._cmp(snake_case_ , snake_case_ ):
_SCREAMING_SNAKE_CASE =left
if right is not None and not self._cmp(snake_case_ , snake_case_ ):
_SCREAMING_SNAKE_CASE =right
return valid_parent
def A ( self : List[Any] , _a : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._parent(snake_case_ )
while parent is not None and not self._cmp(snake_case_ , snake_case_ ):
self._swap(snake_case_ , snake_case_ )
_SCREAMING_SNAKE_CASE =parent, self._parent(snake_case_ )
def A ( self : List[str] , _a : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._get_valid_parent(snake_case_ )
while valid_parent != index:
self._swap(snake_case_ , snake_case_ )
_SCREAMING_SNAKE_CASE =valid_parent, self._get_valid_parent(snake_case_ )
def A ( self : int , _a : int , _a : int ) -> Any:
'''simple docstring'''
if item not in self.pos_map:
return
_SCREAMING_SNAKE_CASE =self.pos_map[item]
_SCREAMING_SNAKE_CASE =[item, self.key(snake_case_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(snake_case_ )
self._heapify_down(snake_case_ )
def A ( self : str , _a : int ) -> int:
'''simple docstring'''
if item not in self.pos_map:
return
_SCREAMING_SNAKE_CASE =self.pos_map[item]
del self.pos_map[item]
_SCREAMING_SNAKE_CASE =self.arr[self.size - 1]
_SCREAMING_SNAKE_CASE =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(snake_case_ )
self._heapify_down(snake_case_ )
def A ( self : List[str] , _a : int , _a : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(snake_case_ )] )
else:
_SCREAMING_SNAKE_CASE =[item, self.key(snake_case_ )]
_SCREAMING_SNAKE_CASE =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A ( self : str ) -> Tuple:
'''simple docstring'''
return self.arr[0] if self.size else None
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405 |
def __UpperCamelCase ( lowerCAmelCase__ : int = 4_0_0_0_0_0_0 ):
__a : Tuple = [0, 1]
__a : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__a : int = 0
for j in range(len(lowerCAmelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 521 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case : List[Any] = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
lowerCAmelCase__ = import_module('tasks' )
try:
lowerCAmelCase__ = getattr(UpperCamelCase_ , model_args.task_type )
lowerCAmelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase__ = dict(enumerate(UpperCamelCase_ ) )
lowerCAmelCase__ = len(UpperCamelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid={label: i for i, label in enumerate(UpperCamelCase_ )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase__ = np.argmax(UpperCamelCase_ , axis=2 )
lowerCAmelCase__ , lowerCAmelCase__ = preds.shape
lowerCAmelCase__ = [[] for _ in range(UpperCamelCase_ )]
lowerCAmelCase__ = [[] for _ in range(UpperCamelCase_ )]
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
"precision": precision_score(UpperCamelCase_ , UpperCamelCase_ ),
"recall": recall_score(UpperCamelCase_ , UpperCamelCase_ ),
"f1": fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
# Data collator
lowerCAmelCase__ = DataCollatorWithPadding(UpperCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCamelCase_ , UpperCamelCase_ )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCamelCase_ )
# Predict
if training_args.do_predict:
lowerCAmelCase__ = TokenClassificationDataset(
token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , UpperCamelCase_ , UpperCamelCase_ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowerCAmelCase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return results
def _UpperCamelCase ( UpperCamelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 365 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
set_seed(7_70)
__snake_case : Union[str, Any] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
__snake_case : int = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
__snake_case : Optional[int] = os.path.dirname(os.path.abspath(__file__))
__snake_case : Union[str, Any] = os.path.join(os.path.expanduser("""~"""), """.cache""")
__snake_case : Any = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(UpperCamelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
hf_hub_download(repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , local_dir=UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
lowerCAmelCase__ = BarkSemanticModel
lowerCAmelCase__ = BarkSemanticConfig
lowerCAmelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase__ = BarkCoarseModel
lowerCAmelCase__ = BarkCoarseConfig
lowerCAmelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase__ = BarkFineModel
lowerCAmelCase__ = BarkFineConfig
lowerCAmelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase__ = F"{model_type}_small" if use_small else model_type
lowerCAmelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCamelCase_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
# this is a hack
lowerCAmelCase__ = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase__ = model_args['vocab_size']
lowerCAmelCase__ = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase__ = model_args.pop('n_head' )
lowerCAmelCase__ = model_args.pop('n_embd' )
lowerCAmelCase__ = model_args.pop('n_layer' )
lowerCAmelCase__ = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase__ = ModelClass(config=UpperCamelCase_ )
lowerCAmelCase__ = GenerationConfigClass()
lowerCAmelCase__ = model_generation_config
lowerCAmelCase__ = checkpoint['model']
# fixup checkpoint
lowerCAmelCase__ = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCamelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase__ = k[len(UpperCamelCase_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase__ = new_k.replace(UpperCamelCase_ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase__ = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase__ = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
lowerCAmelCase__ = model.num_parameters(exclude_embeddings=UpperCamelCase_ )
lowerCAmelCase__ = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCamelCase_ , 3 )} loss" )
model.eval()
model.to(UpperCamelCase_ )
del checkpoint, state_dict
return model
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[int]="text" ) -> Dict:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase__ = 'cpu' # do conversion on cpu
lowerCAmelCase__ = _get_ckpt_path(UpperCamelCase_ , use_small=UpperCamelCase_ )
lowerCAmelCase__ = _load_model(UpperCamelCase_ , UpperCamelCase_ , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
# load bark initial model
lowerCAmelCase__ = _bark_load_model(UpperCamelCase_ , 'cpu' , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
if model_type == "text":
lowerCAmelCase__ = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCamelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase__ = 5
lowerCAmelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase__ = bark_model(UpperCamelCase_ )[0]
lowerCAmelCase__ = model(UpperCamelCase_ )
# take last logits
lowerCAmelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase__ = 3
lowerCAmelCase__ = 8
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase__ = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = bark_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkSemanticModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkCoarseModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkFineModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkConfig.from_sub_model_configs(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase__ = BarkModel(UpperCamelCase_ )
lowerCAmelCase__ = semantic
lowerCAmelCase__ = coarseAcoustic
lowerCAmelCase__ = fineAcoustic
lowerCAmelCase__ = codec
lowerCAmelCase__ = bark_generation_config
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
bark.save_pretrained(UpperCamelCase_ , repo_id=UpperCamelCase_ , push_to_hub=UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
__snake_case : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 365 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=False , A=True , A=False , A=False , A=19 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=A , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def a__ (self , A , A , A , A , A , A ) -> Dict:
"""simple docstring"""
_a = EsmForProteinFolding(config=A ).float()
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
_a = model(A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = False
__lowerCamelCase : Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCamelCase : Union[str, Any] = ()
__lowerCamelCase : Union[str, Any] = {} if is_torch_available() else {}
__lowerCamelCase : List[str] = False
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = EsmFoldModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip('''Does not support attention outputs''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def a__ (self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
class __A ( A ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_a = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_a = model(A )['''positions''']
_a = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , A , atol=1E-4 ) )
| 11 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 1 |
import re
def A__ ( _a : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Tuple =re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_a , _a ) )
if __name__ == "__main__":
__lowerCamelCase : List[str] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 701 |
from string import ascii_uppercase
__lowerCamelCase : List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def A__ ( _a : int , _a : int ):
'''simple docstring'''
if isinstance(_a , _a ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(_a , _a ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(_a , _a ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
snake_case__ : Union[str, Any] =""""""
snake_case__ : List[str] =0
snake_case__ : List[Any] =0
while div != 1:
snake_case__ , snake_case__ : Union[str, Any] =divmod(_a , _a )
if base >= 11 and 9 < mod < 36:
snake_case__ : Dict =ALPHABET_VALUES[str(_a )]
else:
snake_case__ : Optional[Any] =str(_a )
new_value += actual_value
snake_case__ : Dict =num // base
snake_case__ : Tuple =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_a )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 448 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
SCREAMING_SNAKE_CASE = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
SCREAMING_SNAKE_CASE = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
SCREAMING_SNAKE_CASE = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
SCREAMING_SNAKE_CASE = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
SCREAMING_SNAKE_CASE = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
SCREAMING_SNAKE_CASE = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
SCREAMING_SNAKE_CASE = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
SCREAMING_SNAKE_CASE = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
SCREAMING_SNAKE_CASE = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
SCREAMING_SNAKE_CASE = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
SCREAMING_SNAKE_CASE = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
SCREAMING_SNAKE_CASE = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
SCREAMING_SNAKE_CASE = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
SCREAMING_SNAKE_CASE = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
SCREAMING_SNAKE_CASE = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = int(key_split[2] ), int(key_split[4] )
SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
SCREAMING_SNAKE_CASE = val.squeeze_()
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase () -> List[str]:
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any:
SCREAMING_SNAKE_CASE = GroupViTConfig()
SCREAMING_SNAKE_CASE = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(text=['a photo of a cat', 'a photo of a dog'] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
SCREAMING_SNAKE_CASE = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
SCREAMING_SNAKE_CASE = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print('Successfully saved processor and model to' , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 247 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __A ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def __A ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , **lowerCAmelCase__ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Dict:
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = 'Encode this sequence.'
SCREAMING_SNAKE_CASE = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> Dict:
pass
def __A ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __A ( self ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase__ )
def __A ( self ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 247 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
__lowercase : Tuple = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**lowerCamelCase_ )
A_ = size if size is not None else {'''shortest_edge''': 224}
A_ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
A_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_center_crop
A_ = crop_size
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
A_ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A_ = int((256 / 224) * size['''shortest_edge'''] )
A_ = get_resize_output_image_size(lowerCamelCase_ , size=lowerCamelCase_ , default_to_square=lowerCamelCase_ )
A_ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
lowerCamelCase_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
A_ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
A_ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A_ = [self.resize(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for image in images]
if do_center_crop:
A_ = [self.center_crop(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
if do_rescale:
A_ = [self.rescale(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
if do_normalize:
A_ = [self.normalize(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for image in images]
A_ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
A_ = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 718 | '''simple docstring'''
from __future__ import annotations
__snake_case : str = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = graph
# mapping node to its parent in resulting breadth first tree
A_ = {}
A_ = source_vertex
def __A ( self ) -> None:
A_ = {self.source_vertex}
A_ = None
A_ = [self.source_vertex] # first in first out queue
while queue:
A_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
A_ = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
A_ = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
A_ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__snake_case : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 174 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (DEISMultistepScheduler,)
a = (("num_inference_steps", 25),)
def lowercase_ ( self : List[str] , **__lowerCamelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict , __lowerCamelCase : Dict=0 , **__lowerCamelCase : int ) -> int:
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''num_inference_steps''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self : List[str] ) -> Dict:
pass
def lowercase_ ( self : Tuple , __lowerCamelCase : str=0 , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''num_inference_steps''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self : int , __lowerCamelCase : Any=None , **__lowerCamelCase : Dict ) -> Tuple:
if scheduler is None:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''num_inference_steps''' , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , '''set_timesteps''' ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , '''set_timesteps''' ):
SCREAMING_SNAKE_CASE__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self : Tuple ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE__ = DEISMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__ = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase_ ( self : Any ) -> Tuple:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : int ) -> List[Any]:
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , algorithm_type='''deis''' , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def lowercase_ ( self : str ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def lowercase_ ( self : Tuple ) -> Dict:
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def lowercase_ ( self : str ) -> Any:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def lowercase_ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.full_loop()
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase_ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 493 |
_SCREAMING_SNAKE_CASE : List[str] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
_SCREAMING_SNAKE_CASE : str = ['''a''', '''b''', '''c''', '''d''', '''e''']
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = start
# add current to visited
visited.append(_A )
SCREAMING_SNAKE_CASE__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(_A , _A , _A )
# if all neighbors visited add current to sort
sort.append(_A )
# if all vertices haven't been visited select a new one to visit
if len(_A ) != len(_A ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(_A , _A , _A )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = topological_sort('''a''', [], [])
print(sort)
| 493 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a ) -> Optional[Any]:
super().__init__()
snake_case_ = nn.ModuleList(a )
def _UpperCamelCase ( self , a , a , a , a , a , a = None , a = None , a = None , a = None , a = False , a = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
snake_case_ , snake_case_ = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
snake_case_ , snake_case_ = down_samples, mid_sample
else:
snake_case_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _UpperCamelCase ( self , a , a = True , a = None , a = False , a = None , ) -> int:
snake_case_ = 0
snake_case_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
snake_case_ = model_path_to_save + F'''_{idx}'''
@classmethod
def _UpperCamelCase ( cls , a , **a ) -> List[str]:
snake_case_ = 0
snake_case_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ = pretrained_model_path
while os.path.isdir(a ):
snake_case_ = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
snake_case_ = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(a )} controlnets loaded from {pretrained_model_path}.''' )
if len(a ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(a )
| 607 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowercase = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowercase = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
lowercase = BeautifulSoup(res.text, "html.parser")
lowercase = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 607 | 1 |
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
UpperCAmelCase_ : List[str] = """Usage of script: script_name <size_of_canvas:int>"""
UpperCAmelCase_ : List[Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def _A (__a ) -> list[list[bool]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[False for i in range(lowercase__ )] for j in range(lowercase__ )]
return canvas
def _A (__a ) -> None:
"""simple docstring"""
for i, row in enumerate(lowercase__ ):
for j, _ in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = bool(random.getrandbits(1 ) )
def _A (__a ) -> list[list[bool]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowercase__ ):
for c, pt in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = __judge_point(
lowercase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
SCREAMING_SNAKE_CASE_ : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
SCREAMING_SNAKE_CASE_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
SCREAMING_SNAKE_CASE_ : List[Any] = pt
if pt:
if alive < 2:
SCREAMING_SNAKE_CASE_ : List[str] = False
elif alive == 2 or alive == 3:
SCREAMING_SNAKE_CASE_ : int = True
elif alive > 3:
SCREAMING_SNAKE_CASE_ : List[Any] = False
else:
if alive == 3:
SCREAMING_SNAKE_CASE_ : Any = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
UpperCAmelCase_ : Optional[Any] = int(sys.argv[1])
# main working structure of this module.
UpperCAmelCase_ : Union[str, Any] = create_canvas(canvas_size)
seed(c)
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = plt.subplots()
fig.show()
UpperCAmelCase_ : Union[str, Any] = ListedColormap(["""w""", """k"""])
try:
while True:
UpperCAmelCase_ : Optional[int] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 512 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal") | 45 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Union[str, Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowercase : int = {
"""google/electra-small-generator""": 5_1_2,
"""google/electra-base-generator""": 5_1_2,
"""google/electra-large-generator""": 5_1_2,
"""google/electra-small-discriminator""": 5_1_2,
"""google/electra-base-discriminator""": 5_1_2,
"""google/electra-large-discriminator""": 5_1_2,
}
lowercase : List[Any] = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ElectraTokenizer
def __init__( self : List[str] , A_ : Optional[int]=None , A_ : Tuple=None , A_ : int=True , A_ : Optional[Any]="[UNK]" , A_ : List[str]="[SEP]" , A_ : Tuple="[PAD]" , A_ : Optional[Any]="[CLS]" , A_ : Union[str, Any]="[MASK]" , A_ : Dict=True , A_ : Tuple=None , **A_ : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCamelCase_: Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A_ ) != tokenize_chinese_chars
):
lowerCamelCase_: int = getattr(A_ , normalizer_state.pop("""type""" ) )
lowerCamelCase_: Any = do_lower_case
lowerCamelCase_: Tuple = strip_accents
lowerCamelCase_: str = tokenize_chinese_chars
lowerCamelCase_: Optional[int] = normalizer_class(**A_ )
lowerCamelCase_: List[Any] = do_lower_case
def lowerCAmelCase ( self : str , A_ : Optional[Any] , A_ : str=None ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Any , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_: Tuple = [self.sep_token_id]
lowerCamelCase_: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Union[str, Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_: Tuple = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 584 | from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 584 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = '''gelu'''
def __init__( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int]=13 , lowerCamelCase : Optional[int]=7 , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=False , lowerCamelCase : Any=99 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=4 , lowerCamelCase : Any=37 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Dict=20 , lowerCamelCase : List[Any]=2 , lowerCamelCase : str=1 , lowerCamelCase : Tuple=0 , lowerCamelCase : Optional[Any]=4 , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCAmelCase = prepare_led_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tf.concat(
[tf.zeros_like(lowerCamelCase )[:, :-1], tf.ones_like(lowerCamelCase )[:, -1:]] , axis=-1 , )
_UpperCAmelCase = global_attention_mask
return config, inputs_dict
def lowerCamelCase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFLEDModel(config=lowerCamelCase ).get_decoder()
_UpperCAmelCase = inputs_dict["""input_ids"""]
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , ) -> List[Any]:
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFLEDModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = tf.zeros_like(inputs_dict["""attention_mask"""] )
_UpperCAmelCase = 2
_UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_UpperCAmelCase = True
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase : Optional[Any] ):
_UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCamelCase : Any ):
_UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
_UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
# TODO: Head-masking not yet implement
pass
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Any:
return tf.constant(__snake_case , dtype=tf.intaa )
__a: int = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_UpperCAmelCase = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase )[0]
_UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
_UpperCAmelCase = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 )
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_UpperCAmelCase = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCAmelCase = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase )[0]
_UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
_UpperCAmelCase = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 , rtol=1E-3 ) | 108 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Any = '''OwlViTImageProcessor'''
UpperCAmelCase__ : Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self :Any ,__snake_case :Tuple=None ,__snake_case :Optional[int]=None ,**__snake_case :Dict ) -> Tuple:
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__snake_case ,)
a__ = kwargs.pop('feature_extractor' )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__snake_case ,__snake_case )
def __call__( self :str ,__snake_case :List[str]=None ,__snake_case :Optional[Any]=None ,__snake_case :List[str]=None ,__snake_case :Union[str, Any]="max_length" ,__snake_case :int="np" ,**__snake_case :List[str] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__snake_case ,__snake_case ) or (isinstance(__snake_case ,__snake_case ) and not isinstance(text[0] ,__snake_case )):
a__ = [self.tokenizer(__snake_case ,padding=__snake_case ,return_tensors=__snake_case ,**__snake_case )]
elif isinstance(__snake_case ,__snake_case ) and isinstance(text[0] ,__snake_case ):
a__ = []
# Maximum number of queries across batch
a__ = max([len(__snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__snake_case ) != max_num_queries:
a__ = t + [' '] * (max_num_queries - len(__snake_case ))
a__ = self.tokenizer(__snake_case ,padding=__snake_case ,return_tensors=__snake_case ,**__snake_case )
encodings.append(__snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
a__ = np.concatenate([encoding['input_ids'] for encoding in encodings] ,axis=0 )
a__ = np.concatenate([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
a__ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] ,axis=0 )
a__ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
a__ = torch.cat([encoding['input_ids'] for encoding in encodings] ,dim=0 )
a__ = torch.cat([encoding['attention_mask'] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
a__ = tf.stack([encoding['input_ids'] for encoding in encodings] ,axis=0 )
a__ = tf.stack([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
a__ = BatchEncoding()
a__ = input_ids
a__ = attention_mask
if query_images is not None:
a__ = BatchEncoding()
a__ = self.image_processor(
__snake_case ,return_tensors=__snake_case ,**__snake_case ).pixel_values
a__ = query_pixel_values
if images is not None:
a__ = self.image_processor(__snake_case ,return_tensors=__snake_case ,**__snake_case )
if text is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) ,tensor_type=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,*__snake_case :Union[str, Any] ,**__snake_case :Any ) -> Dict:
return self.image_processor.post_process(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[int] ,*__snake_case :List[str] ,**__snake_case :List[str] ) -> Dict:
return self.image_processor.post_process_object_detection(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[int] ,*__snake_case :List[Any] ,**__snake_case :Tuple ) -> Tuple:
return self.image_processor.post_process_image_guided_detection(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Dict ,*__snake_case :str ,**__snake_case :Optional[Any] ) -> List[Any]:
return self.tokenizer.batch_decode(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,*__snake_case :Union[str, Any] ,**__snake_case :Dict ) -> List[str]:
return self.tokenizer.decode(*__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__snake_case ,)
return self.image_processor_class
@property
def lowerCamelCase__( self :List[Any] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,__snake_case ,)
return self.image_processor
| 335 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
_lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
_lowerCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowerCamelCase : List[str] = question_encoder_name_or_path
_lowerCamelCase : Optional[Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
_lowerCamelCase : Union[str, Any] = RagConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : Tuple = gen_config
_lowerCamelCase : Any = question_encoder_config
_lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
_lowerCamelCase : str = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))")) | 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = 3_84
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 37
lowerCamelCase__ = 'gelu'
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_12
lowerCamelCase__ = 16
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = 1_28
lowerCamelCase__ = 2
lowerCamelCase__ = 9
lowerCamelCase__ = 1
lowerCamelCase__ = None
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFConvBertModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Tuple = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Union[str, Any] = False
a_ : Dict = False
a_ : str = False
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFConvBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : int ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = True
if hasattr(SCREAMING_SNAKE_CASE__ , 'use_cache' ):
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = len(model(SCREAMING_SNAKE_CASE__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ , saved_model=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'saved_model' , '1' )
lowerCamelCase__ = tf.keras.models.load_model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
lowerCamelCase__ = outputs['encoder_hidden_states']
lowerCamelCase__ = outputs['encoder_attentions']
else:
lowerCamelCase__ = outputs['hidden_states']
lowerCamelCase__ = outputs['attentions']
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = [1, 6, 7_68]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCAmelCase = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_lowerCAmelCase = dataset.iloc[:, 1:2].values
_lowerCAmelCase = dataset.iloc[:, 2].values
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCAmelCase = PolynomialFeatures(degree=4)
_lowerCAmelCase = poly_reg.fit_transform(X)
_lowerCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
plt.scatter(a , a , color='''red''' )
plt.plot(a , pol_reg.predict(poly_reg.fit_transform(a ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 432 |
'''simple docstring'''
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase_ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowercase_ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowercase_ : Optional[int] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase_ : Dict = f'''down_blocks.{i}.resnets.{j}.'''
lowercase_ : Tuple = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase_ : Dict = f'''down_blocks.{i}.attentions.{j}.'''
lowercase_ : Optional[int] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase_ : int = f'''up_blocks.{i}.resnets.{j}.'''
lowercase_ : List[str] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase_ : List[str] = f'''up_blocks.{i}.attentions.{j}.'''
lowercase_ : str = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase_ : int = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowercase_ : Union[str, Any] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase_ : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase_ : Union[str, Any] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase_ : Dict = 'mid_block.attentions.0.'
lowercase_ : int = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase_ : str = f'''mid_block.resnets.{j}.'''
lowercase_ : Union[str, Any] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A__ ( snake_case_ : int ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE__: Any= {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE__: Any= sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE__: Union[str, Any]= v.replace(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE__: Tuple= v.replace(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Any= v
SCREAMING_SNAKE_CASE__: Tuple= {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase_ : List[str] = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase_ : List[str] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowercase_ : Optional[int] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase_ : Optional[Any] = f'''down_blocks.{i}.downsamplers.0.'''
lowercase_ : List[Any] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase_ : Any = f'''up_blocks.{i}.upsamplers.0.'''
lowercase_ : List[str] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase_ : List[str] = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowercase_ : str = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase_ : int = f'''mid_block.resnets.{i}.'''
lowercase_ : int = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase_ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def A__ ( snake_case_ : Optional[Any] ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def A__ ( snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: int= {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE__: str= v.replace(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE__: List[Any]= v.replace(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= v
SCREAMING_SNAKE_CASE__: List[Any]= {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE__: int= ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
SCREAMING_SNAKE_CASE__: Tuple= reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase_ : List[Any] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowercase_ : Optional[int] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase_ : List[Any] = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase_ : Optional[Any] = {'q': 0, 'k': 1, 'v': 2}
def A__ ( snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Optional[int]= {}
SCREAMING_SNAKE_CASE__: Optional[Any]= {}
SCREAMING_SNAKE_CASE__: List[Any]= {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
SCREAMING_SNAKE_CASE__: Optional[int]= k[: -len('''.q_proj.weight''' )]
SCREAMING_SNAKE_CASE__: Union[str, Any]= k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE__: Optional[Any]= [None, None, None]
SCREAMING_SNAKE_CASE__: List[Any]= v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
SCREAMING_SNAKE_CASE__: List[Any]= k[: -len('''.q_proj.bias''' )]
SCREAMING_SNAKE_CASE__: Any= k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE__: Dict= [None, None, None]
SCREAMING_SNAKE_CASE__: Optional[Any]= v
continue
SCREAMING_SNAKE_CASE__: Optional[Any]= textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
SCREAMING_SNAKE_CASE__: str= textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
SCREAMING_SNAKE_CASE__: List[str]= textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= torch.cat(snake_case_ )
return new_state_dict
def A__ ( snake_case_ : str ):
return text_enc_dict
if __name__ == "__main__":
lowercase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowercase_ : Tuple = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase_ : Union[str, Any] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowercase_ : str = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowercase_ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase_ : Optional[Any] = load_file(unet_path, device='cpu')
else:
lowercase_ : List[str] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowercase_ : List[str] = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowercase_ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowercase_ : Union[str, Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowercase_ : Union[str, Any] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowercase_ : List[str] = load_file(text_enc_path, device='cpu')
else:
lowercase_ : str = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowercase_ : Tuple = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowercase_ : Optional[Any] = convert_unet_state_dict(unet_state_dict)
lowercase_ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase_ : Optional[int] = convert_vae_state_dict(vae_state_dict)
lowercase_ : Tuple = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase_ : Any = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase_ : int = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowercase_ : int = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase_ : Optional[int] = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowercase_ : str = convert_text_enc_state_dict(text_enc_dict)
lowercase_ : Tuple = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase_ : str = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase_ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase_ : List[Any] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 107 | import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: Tuple= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Dict= min_resolution
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_resolution
SCREAMING_SNAKE_CASE__: Optional[Any]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[Any]= apply_ocr
def UpperCamelCase_ ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: Dict= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: int= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: str= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Dict= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: List[Any]= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 107 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : int = 32
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 16 ):
__magic_name__ : List[str] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ : str =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : List[str] =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : int =datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : Any =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : List[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : List[str] =16
elif accelerator.mixed_precision != "no":
__magic_name__ : Tuple =8
else:
__magic_name__ : Dict =None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__magic_name__ : Tuple =DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
__magic_name__ : Dict =DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : Tuple = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase ) == "1":
__magic_name__ : Tuple =2
# New Code #
__magic_name__ : List[Any] =int(args.gradient_accumulation_steps )
__magic_name__ : int =int(args.local_sgd_steps )
# Initialize accelerator
__magic_name__ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : List[str] =config["""lr"""]
__magic_name__ : Dict =int(config["""num_epochs"""] )
__magic_name__ : Optional[int] =int(config["""seed"""] )
__magic_name__ : Tuple =int(config["""batch_size"""] )
__magic_name__ : Dict =evaluate.load("""glue""" , """mrpc""" )
set_seed(lowerCamelCase )
__magic_name__ , __magic_name__ : Union[str, Any] =get_dataloaders(lowerCamelCase , lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : str =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : Tuple =AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
__magic_name__ : Dict =get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
with LocalSGD(
accelerator=lowerCamelCase , model=lowerCamelCase , local_sgd_steps=lowerCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase ):
__magic_name__ : Tuple =model(**lowerCamelCase )
__magic_name__ : Optional[int] =output.loss
accelerator.backward(lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : int =model(**lowerCamelCase )
__magic_name__ : int =outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
__magic_name__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase )
def lowerCAmelCase_ ( ):
__magic_name__ : List[str] =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowerCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__magic_name__ : Tuple =parser.parse_args()
__magic_name__ : Any ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 21 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = 0
if start < end:
lowerCAmelCase = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = a[end]
lowerCAmelCase = a[pivot]
lowerCAmelCase = temp
lowerCAmelCase, lowerCAmelCase = _in_place_partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , p - 1 )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , p + 1 , _SCREAMING_SNAKE_CASE )
return count
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = a[end]
lowerCAmelCase = a[pivot]
lowerCAmelCase = temp
lowerCAmelCase = start - 1
for index in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase = new_pivot_index + 1
lowerCAmelCase = a[new_pivot_index]
lowerCAmelCase = a[index]
lowerCAmelCase = temp
lowerCAmelCase = a[new_pivot_index + 1]
lowerCAmelCase = a[end]
lowerCAmelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase = TemporaryFile()
UpperCAmelCase = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase = 0, 1 # mean and standard deviation
UpperCAmelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase = np.load(outfile)
UpperCAmelCase = len(M) - 1
UpperCAmelCase = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 433 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A__ : Dict = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 244 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :BigBirdConfig
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().setup()
lowerCamelCase_ : List[Any] =nn.Dense(5 , dtype=self.dtype )
def __call__( self : str , *snake_case__ : Optional[int] , **snake_case__ : List[str] ):
lowerCamelCase_ : List[str] =super().__call__(*snake_case__ , **snake_case__ )
lowerCamelCase_ : Any =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Dict:
def cross_entropy(lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ):
lowerCamelCase_ : List[str] =logits.shape[-1]
lowerCamelCase_ : Tuple =(labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("f4" )
lowerCamelCase_ : Any =jax.nn.log_softmax(lowerCamelCase__ , axis=-1 )
lowerCamelCase_ : str =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase_ : int =reduction(lowerCamelCase__ )
return loss
lowerCamelCase_ : str =partial(lowerCamelCase__ , reduction=jnp.mean )
lowerCamelCase_ : Union[str, Any] =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase__ :
_UpperCAmelCase :str = "google/bigbird-roberta-base"
_UpperCAmelCase :int = 3000
_UpperCAmelCase :int = 10500
_UpperCAmelCase :int = 128
_UpperCAmelCase :int = 3
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 5
# tx_args
_UpperCAmelCase :float = 3e-5
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 20000
_UpperCAmelCase :float = 0.00_95
_UpperCAmelCase :str = "bigbird-roberta-natural-questions"
_UpperCAmelCase :str = "training-expt"
_UpperCAmelCase :str = "data/nq-training.jsonl"
_UpperCAmelCase :str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self : List[Any] ):
os.makedirs(self.base_dir , exist_ok=snake_case__ )
lowerCamelCase_ : List[Any] =os.path.join(self.base_dir , self.save_dir )
lowerCamelCase_ : str =self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase__ :
_UpperCAmelCase :int
_UpperCAmelCase :int = 4096 # no dynamic padding on TPUs
def __call__( self : int , snake_case__ : str ):
lowerCamelCase_ : Any =self.collate_fn(snake_case__ )
lowerCamelCase_ : Tuple =jax.tree_util.tree_map(snake_case__ , snake_case__ )
return batch
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any ):
lowerCamelCase_ , lowerCamelCase_ : Any =self.fetch_inputs(features["input_ids"] )
lowerCamelCase_ : Optional[int] ={
"input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase__ ( self : List[str] , snake_case__ : list ):
lowerCamelCase_ : Dict =[self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : list ):
lowerCamelCase_ : Any =[1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None ) -> Tuple:
if seed is not None:
lowerCamelCase_ : List[Any] =dataset.shuffle(seed=lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) // batch_size ):
lowerCamelCase_ : Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase__ )
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) -> str:
def loss_fn(lowerCamelCase__ : Optional[int] ):
lowerCamelCase_ : Any =model_inputs.pop("start_labels" )
lowerCamelCase_ : Dict =model_inputs.pop("end_labels" )
lowerCamelCase_ : Union[str, Any] =model_inputs.pop("pooled_labels" )
lowerCamelCase_ : Optional[Any] =state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple =outputs
return state.loss_fn(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
lowerCamelCase_ , lowerCamelCase_ : Any =jax.random.split(lowerCamelCase__ )
lowerCamelCase_ : str =jax.value_and_grad(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =grad_fn(state.params )
lowerCamelCase_ : int =jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase_ : Optional[Any] =jax.lax.pmean(lowerCamelCase__ , "batch" )
lowerCamelCase_ : str =state.apply_gradients(grads=lowerCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> List[str]:
lowerCamelCase_ : List[str] =model_inputs.pop("start_labels" )
lowerCamelCase_ : str =model_inputs.pop("end_labels" )
lowerCamelCase_ : int =model_inputs.pop("pooled_labels" )
lowerCamelCase_ : List[Any] =state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] =outputs
lowerCamelCase_ : List[Any] =state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class lowercase__ ( train_state.TrainState ):
_UpperCAmelCase :Callable = struct.field(pytree_node=snake_case__ )
@dataclass
class lowercase__ :
_UpperCAmelCase :Args
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :wandb
_UpperCAmelCase :Callable = None
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[Any]=None ):
lowerCamelCase_ : Union[str, Any] =model.params
lowerCamelCase_ : Tuple =TrainState.create(
apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , )
if ckpt_dir is not None:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =restore_checkpoint(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase_ , lowerCamelCase_ : List[str] =build_tx(**snake_case__ )
lowerCamelCase_ : str =train_state.TrainState(
step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , )
lowerCamelCase_ : Any =args
lowerCamelCase_ : Optional[Any] =data_collator
lowerCamelCase_ : int =lr
lowerCamelCase_ : List[Any] =params
lowerCamelCase_ : Tuple =jax_utils.replicate(snake_case__ )
return state
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =self.args
lowerCamelCase_ : List[Any] =len(snake_case__ ) // args.batch_size
lowerCamelCase_ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase_ : int =jax.random.split(snake_case__ , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase_ : str =jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase_ : int =get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ )
lowerCamelCase_ : Any =0
for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"""Running EPOCH-{epoch}""" ):
lowerCamelCase_ : Dict =self.data_collator(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase_ : int =jax_utils.unreplicate(state.step )
lowerCamelCase_ : Any =running_loss.item() / i
lowerCamelCase_ : Tuple =self.scheduler_fn(state_step - 1 )
lowerCamelCase_ : Tuple =self.evaluate(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__ , commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =get_batched_dataset(snake_case__ , self.args.batch_size )
lowerCamelCase_ : str =len(snake_case__ ) // self.args.batch_size
lowerCamelCase_ : int =jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase_ : Any =0
for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ):
lowerCamelCase_ : str =self.data_collator(snake_case__ )
lowerCamelCase_ : Tuple =self.val_step_fn(snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : Any ):
lowerCamelCase_ : int =jax_utils.unreplicate(snake_case__ )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(snake_case__ , params=state.params )
with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) )
with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , snake_case__ )
print("DONE" )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> Dict:
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(lowerCamelCase__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase_ : List[Any] =from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase_ : Tuple =from_bytes(state.opt_state , f.read() )
lowerCamelCase_ : Union[str, Any] =joblib.load(os.path.join(lowerCamelCase__ , "args.joblib" ) )
lowerCamelCase_ : str =joblib.load(os.path.join(lowerCamelCase__ , "data_collator.joblib" ) )
with open(os.path.join(lowerCamelCase__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase_ : Union[str, Any] =json.load(lowerCamelCase__ )
lowerCamelCase_ : str =training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) -> str:
lowerCamelCase_ : Tuple =num_train_steps - warmup_steps
lowerCamelCase_ : Any =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ )
lowerCamelCase_ : int =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1e-7 , transition_steps=lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> List[str]:
def weight_decay_mask(lowerCamelCase__ : List[Any] ):
lowerCamelCase_ : Any =traverse_util.flatten_dict(lowerCamelCase__ )
lowerCamelCase_ : Tuple ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : List[str] =optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ )
return tx, lr
| 244 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 27 |
SCREAMING_SNAKE_CASE :Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE :Dict = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE :int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE :Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE :int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE :Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE :Dict = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE :int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 628 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def _a ( lowerCamelCase__ ) -> Any:
# getting number of pixels in the image
lowerCamelCase_ : List[str] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
lowerCamelCase_ : int = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCamelCase = imread('''image_data/lena.jpg''', 1)
# convert to its negative
UpperCamelCase = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 719 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 144 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_ = ['text', 'image', 'audio']
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowercase ,_lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
for output in outputs:
if isinstance(_lowercase ,(str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_lowercase ,(Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_lowercase ,(torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class snake_case_ :
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
self.assertTrue(hasattr(self.tool , '''inputs'''))
self.assertTrue(hasattr(self.tool , '''outputs'''))
UpperCamelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
UpperCamelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = create_inputs(self.tool.inputs)
UpperCamelCase = self.tool(*lowerCamelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
UpperCamelCase = [outputs]
self.assertListEqual(output_types(lowerCamelCase_) , self.tool.outputs)
def UpperCAmelCase__ ( self) -> str:
self.assertTrue(hasattr(self.tool , '''description'''))
self.assertTrue(hasattr(self.tool , '''default_checkpoint'''))
self.assertTrue(self.tool.description.startswith('''This is a tool that'''))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = create_inputs(self.tool.inputs)
UpperCamelCase = self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs))
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs):
UpperCamelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = create_inputs(self.tool.inputs)
UpperCamelCase = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
UpperCamelCase = self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs)) | 34 | import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__a = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
__a = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__lowerCamelCase ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__a = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
__a = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__lowerCamelCase ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__a = result.headers['Location']
__a = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__a = os.path.join(__lowerCamelCase , f'''{artifact_name}.zip''' )
with open(__lowerCamelCase , 'wb' ) as fp:
fp.write(response.content )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = []
__a = []
__a = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__a = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a = line[: line.index(': ' )]
__a = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
__a = line[len('FAILED ' ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__a = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` '''
f'''and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
__a = None
if job_name and job_links:
__a = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__a = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = []
__a = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = Counter()
counter.update([x[1] for x in logs] )
__a = counter.most_common()
__a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCAmelCase( __lowerCamelCase ):
__a = test.split('::' )[0]
if test.startswith('tests/models/' ):
__a = test.split('/' )[2]
else:
__a = None
return test
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a = [x for x in logs if x[2] is not None]
__a = {x[2] for x in logs}
__a = {}
for test in tests:
__a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a = counter.most_common()
__a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a = sum(error_counts.values() )
if n_errors > 0:
__a = {'count': n_errors, 'errors': error_counts}
__a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCAmelCase( __lowerCamelCase ):
__a = '| no. | error | status |'
__a = '|-:|:-|:-|'
__a = [header, sep]
for error in reduced_by_error:
__a = reduced_by_error[error]['count']
__a = f'''| {count} | {error[:100]} | |'''
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = '| model | no. of errors | major error | count |'
__a = '|-:|-:|-:|-:|'
__a = [header, sep]
for model in reduced_by_model:
__a = reduced_by_model[model]['count']
__a , __a = list(reduced_by_model[model]['errors'].items() )[0]
__a = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
lowerCamelCase_ : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase_ : Any = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase_ : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase_ : int = k.find(""" / """)
lowerCamelCase_ : str = k[index + len(""" / """) :]
lowerCamelCase_ : Union[str, Any] = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase_ : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase_ : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase_ : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ : Optional[int] = reduce_by_error(errors)
lowerCamelCase_ : Optional[int] = reduce_by_model(errors)
lowerCamelCase_ : Any = make_github_table(reduced_by_error)
lowerCamelCase_ : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 559 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Tuple = """gpt_neox"""
def __init__( self , UpperCAmelCase__=50_432 , UpperCAmelCase__=6_144 , UpperCAmelCase__=44 , UpperCAmelCase__=64 , UpperCAmelCase__=24_576 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.25 , UpperCAmelCase__=10_000 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=2_048 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=True , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = attention_dropout
A__ = hidden_dropout
A__ = classifier_dropout
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = tie_word_embeddings
A__ = use_parallel_residual
A__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __A ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
A__ = self.rope_scaling.get("type" , UpperCAmelCase__ )
A__ = self.rope_scaling.get("factor" , UpperCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 232 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Optional[int] = 16
UpperCAmelCase_ : List[Any] = 32
def UpperCamelCase ( _A : Accelerator , _A : int = 16 )-> Dict:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=_A )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _A : str , _A : List[str] )-> Union[str, Any]:
"""simple docstring"""
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_A )
A__ , A__ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_A )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_A )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_A , references=_A , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _A )
def UpperCamelCase ( )-> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
A__ = parser.parse_args()
A__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 232 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = np.array(data['data'])
__UpperCAmelCase = np.array(data['target'])
__UpperCAmelCase = data['target_names']
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y)
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : int , lowercase__ : str=5 ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
lowerCAmelCase_ : Optional[int] = []
for data_point in data:
lowerCAmelCase_ : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase_ : List[Any] = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase_ : List[Any] = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 600 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Optional[int] = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Union[str, Any] , **UpperCAmelCase_ : Any ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : int = image_processor(UpperCAmelCase_ , return_tensors="np" )
SCREAMING_SNAKE_CASE : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = "lower newer"
SCREAMING_SNAKE_CASE : Dict = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = "lower newer"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase_ ):
processor()
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Optional[Any] = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = "lower newer"
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Dict = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 62 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self):
A__ = 1
A__ = 3
A__ = (3_2, 3_2)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(A_)
return image
@property
def snake_case_ ( self):
torch.manual_seed(0)
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def snake_case_ ( self):
torch.manual_seed(0)
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def snake_case_ ( self):
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(A_)
@property
def snake_case_ ( self):
def extract(*a__ , **a__):
class _UpperCAmelCase :
def __init__( self):
A__ = torch.ones([0])
def snake_case_ ( self , a__):
self.pixel_values.to(A_)
return self
return Out()
return extract
def snake_case_ ( self):
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
A__ = sd_pipe.to(A_)
sd_pipe.set_progress_bar_config(disable=A_)
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.Generator(device=A_).manual_seed(0)
A__ = sd_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
A__ = output.images
A__ = torch.Generator(device=A_).manual_seed(0)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A__ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def snake_case_ ( self):
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=A_)
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
A__ = sd_pipe.to(A_)
sd_pipe.set_progress_bar_config(disable=A_)
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.Generator(device=A_).manual_seed(0)
A__ = sd_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
A__ = output.images
A__ = torch.Generator(device=A_).manual_seed(0)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A__ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def snake_case_ ( self):
A__ = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=A_)
assert isinstance(A_ , A_)
assert isinstance(pipe.scheduler , A_)
assert pipe.safety_checker is None
A__ = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A_)
A__ = StableDiffusionPipeline.from_pretrained(A_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A__ = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def snake_case_ ( self):
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=A_)
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
A__ = sd_pipe.to(A_)
sd_pipe.set_progress_bar_config(disable=A_)
A__ = '''A painting of a squirrel eating a burger'''
A__ = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''').images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
A__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=A_)
A__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
A__ = sd_pipe.to(A_)
sd_pipe.set_progress_bar_config(disable=A_)
A__ = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
A__ = 4_0_0_3_6_6_0_3_4_6
A__ = 7
# without safety guidance (sld_guidance_scale = 0)
A__ = torch.manual_seed(A_)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
A__ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
A__ = torch.manual_seed(A_)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
A__ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def snake_case_ ( self):
A__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=A_)
A__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
A__ = sd_pipe.to(A_)
sd_pipe.set_progress_bar_config(disable=A_)
A__ = '''padme amidala taking a bath artwork, safe for work, no nudity'''
A__ = 2_7_3_4_9_7_1_7_5_5
A__ = 7
A__ = torch.manual_seed(A_)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
A__ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
A__ = torch.manual_seed(A_)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
A__ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def snake_case_ ( self):
A__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''')
A__ = sd_pipe.to(A_)
sd_pipe.set_progress_bar_config(disable=A_)
A__ = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
A__ = 1_0_4_4_3_5_5_2_3_4
A__ = 1_2
A__ = torch.manual_seed(A_)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
A__ = torch.manual_seed(A_)
A__ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1])
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''bert'''
def __init__( self , a__=3_0_5_2_2 , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , **a__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 526 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 676 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
a__ : List[Any] = parser.parse_args()
return args.f
def lowercase__ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : int = {}
a__ : Any = os.path.join(lowerCAmelCase__ , "all_results.json" )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , "r" ) as f:
a__ : List[Any] = json.load(lowerCAmelCase__ )
else:
raise ValueError(F"can't find {path}" )
return results
def lowercase__ ( ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCAmelCase ( _UpperCamelCase ):
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
a__ : Optional[Any] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
a__ : Tuple = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ) -> str:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
a__ : List[Any] = self.get_auto_remove_tmp_dir()
a__ : Tuple = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
a__ : Dict = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = self.get_auto_remove_tmp_dir()
a__ : Optional[int] = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a__ : Optional[Any] = get_results(a_ )
self.assertLess(result["perplexity"] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.get_auto_remove_tmp_dir()
a__ : Dict = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
a__ : str = get_results(a_ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
a__ : Dict = 7 if get_gpu_count() > 1 else 2
a__ : Tuple = self.get_auto_remove_tmp_dir()
a__ : Union[str, Any] = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
a__ : str = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
a__ : int = self.get_auto_remove_tmp_dir()
a__ : Union[str, Any] = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
a__ : int = get_results(a_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
a__ : Dict = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
a__ : Any = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(a_ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.get_auto_remove_tmp_dir()
a__ : str = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
a__ : List[Any] = get_results(a_ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
a__ : str = self.get_auto_remove_tmp_dir()
a__ : List[str] = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
a__ : Optional[int] = get_results(a_ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "translation_no_trainer" ) ) )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(a_ )
a__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
a__ : str = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
a__ : List[Any] = get_results(a_ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
a__ : str = self.get_auto_remove_tmp_dir()
a__ : Optional[int] = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
a__ : Tuple = get_results(a_ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(a_ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "image_classification_no_trainer" ) ) ) | 251 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
'''simple docstring'''
a__ : List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 251 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Dict ):
snake_case__ = 3
snake_case__ = 2_50
snake_case__ = ids_tensor((batch_size, length) , _a )
snake_case__ = torch.ones((batch_size, length) , device=_a , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self._get_tensors(5 )
snake_case__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = MaxLengthCriteria(max_length=10 )
snake_case__ , snake_case__ = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case__ , snake_case__ = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
snake_case__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self._get_tensors(5 )
snake_case__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case__ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_a ) , 1 )
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = StableDiffusionSAGPipeline
A = TEXT_TO_IMAGE_PARAMS
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = False
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCamelCase_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase_ : Dict = CLIPTextModel(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCamelCase_ : Any = torch.manual_seed(UpperCamelCase_ )
else:
lowerCamelCase_ : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCamelCase_ : Tuple = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCamelCase_ : int = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : str = '''.'''
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : int = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Dict = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Tuple = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : Tuple = '''.'''
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Optional[int] = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[str] = '''.'''
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowerCamelCase_ : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 418 |
'''simple docstring'''
__lowerCamelCase : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__lowerCamelCase : Dict = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__lowerCamelCase : str = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__lowerCamelCase : Tuple = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__lowerCamelCase : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__lowerCamelCase : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__lowerCamelCase : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__lowerCamelCase : List[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 418 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase__ ) )
] # the reference grid
SCREAMING_SNAKE_CASE__ :Any = 1
SCREAMING_SNAKE_CASE__ :Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase__ ) )
] # the action grid
SCREAMING_SNAKE_CASE__ :int = init[0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = init[1]
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE__ :List[Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE__ :Any = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE__ :str = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCAmelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE__ :List[Any] = cell.pop()
SCREAMING_SNAKE_CASE__ :Optional[int] = next_cell[2]
SCREAMING_SNAKE_CASE__ :Any = next_cell[3]
SCREAMING_SNAKE_CASE__ :Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE__ :Tuple = True
else:
for i in range(len(UpperCAmelCase__ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE__ :Optional[int] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE__ :int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCAmelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE__ :str = g + cost
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ :Any = i
SCREAMING_SNAKE_CASE__ :int = []
SCREAMING_SNAKE_CASE__ :Union[str, Any] = goal[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE__ :Optional[int] = xa
SCREAMING_SNAKE_CASE__ :int = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE__ :int = []
for i in range(len(UpperCAmelCase__ ) ):
path.append(invpath[len(UpperCAmelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 209 | '''simple docstring'''
import numpy as np
import qiskit
def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.random.default_rng(seed=UpperCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ :Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rng.integers(2 , size=UpperCAmelCase__ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ :List[Any] = rng.integers(2 , size=UpperCAmelCase__ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ :str = rng.integers(2 , size=UpperCAmelCase__ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ :int = qiskit.QuantumCircuit(UpperCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ :str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ :int = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ :List[Any] = job.result().get_counts(UpperCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ :Any = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ :Optional[Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 209 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowerCAmelCase : int = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase , cache_dir=lowerCAmelCase )
__lowerCAmelCase : Any = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase , os.listdir(lowerCAmelCase )[0] , """snapshots""" ) )]
__lowerCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowerCAmelCase : Dict = jax.random.PRNGKey(0 )
__lowerCAmelCase : Optional[Any] = 4
__lowerCAmelCase : Any = jax.device_count()
__lowerCAmelCase : str = num_samples * [prompt]
__lowerCAmelCase : Tuple = pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
__lowerCAmelCase : Optional[int] = replicate(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = jax.random.split(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = shard(lowerCAmelCase )
__lowerCAmelCase : Any = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
__lowerCAmelCase : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase ) == num_samples
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase )
__lowerCAmelCase : Tuple = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
__lowerCAmelCase : List[Any] = 50
__lowerCAmelCase : List[str] = jax.device_count()
__lowerCAmelCase : Optional[int] = num_samples * [prompt]
__lowerCAmelCase : List[Any] = pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
__lowerCAmelCase : Tuple = replicate(lowerCAmelCase )
__lowerCAmelCase : Tuple = jax.random.split(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = shard(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase )
__lowerCAmelCase : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowerCAmelCase : Any = jax.random.PRNGKey(0 )
__lowerCAmelCase : Tuple = 50
__lowerCAmelCase : Any = jax.device_count()
__lowerCAmelCase : Any = num_samples * [prompt]
__lowerCAmelCase : str = pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
__lowerCAmelCase : Tuple = replicate(lowerCAmelCase )
__lowerCAmelCase : List[Any] = jax.random.split(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = shard(lowerCAmelCase )
__lowerCAmelCase : Dict = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
__lowerCAmelCase : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowerCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
__lowerCAmelCase : Union[str, Any] = 50
__lowerCAmelCase : Dict = jax.device_count()
__lowerCAmelCase : Dict = num_samples * [prompt]
__lowerCAmelCase : List[str] = pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
__lowerCAmelCase : List[str] = replicate(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = jax.random.split(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = shard(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
__lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , )
__lowerCAmelCase : List[Any] = scheduler.create_state()
__lowerCAmelCase : List[Any] = scheduler_state
__lowerCAmelCase : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowerCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
__lowerCAmelCase : Tuple = 50
__lowerCAmelCase : Any = jax.device_count()
__lowerCAmelCase : Optional[int] = num_samples * [prompt]
__lowerCAmelCase : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
__lowerCAmelCase : Any = replicate(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = jax.random.split(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = shard(lowerCAmelCase )
__lowerCAmelCase : str = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowerCAmelCase : Optional[Any] = jax.device_count()
__lowerCAmelCase : List[str] = num_samples * [prompt]
__lowerCAmelCase : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase )
__lowerCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase , )
__lowerCAmelCase : List[Any] = replicate(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = pipeline.prepare_inputs(lowerCAmelCase )
__lowerCAmelCase : List[Any] = shard(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
__lowerCAmelCase : Tuple = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
__lowerCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase , use_memory_efficient_attention=lowerCAmelCase , )
__lowerCAmelCase : List[Any] = replicate(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = shard(lowerCAmelCase )
__lowerCAmelCase : Tuple = pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
__lowerCAmelCase : Dict = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 721 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ (__A : Tuple , __A : List[str] , __A : str=None , __A : Any=None , __A : Union[str, Any]=None , __A : str=None , __A : str=None , __A : Tuple=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : int=False , lowerCAmelCase : Any=99 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : List[str]=0.02 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : List[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Dict = initializer_range
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[int] = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , )
__lowerCAmelCase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : str = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowerCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : List[str] = model.decode(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Any = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
__lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =99
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Any = lm_model(input_ids=lowerCAmelCase )
__lowerCAmelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : List[str] = lm_model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Tuple = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : int = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : List[Any] = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase , a_ ):
"""simple docstring"""
lowerCamelCase : Dict =True
lowerCamelCase : List[Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Tuple =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Tuple = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Optional[int] = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Tuple = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowerCAmelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Union[str, Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Optional[Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : Any = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__lowerCAmelCase : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase )
__lowerCAmelCase : str = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__lowerCAmelCase : List[str] = ["""Sam"""]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , return_tensors="""jax""" )
__lowerCAmelCase : Union[str, Any] = model.generate(**lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
__lowerCAmelCase : List[Any] = tokenizer.batch_decode(lowerCAmelCase , **lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 218 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = MobileBertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
__UpperCamelCase :int = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 167 | import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = DDIMPipeline
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : Optional[int] = False
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__UpperCamelCase :Dict = DDIMScheduler()
__UpperCamelCase :int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Optional[int] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = '''cpu'''
__UpperCamelCase :Any = self.get_dummy_components()
__UpperCamelCase :Any = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = pipe(**__lowercase).images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
__UpperCamelCase :List[str] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
__UpperCamelCase :int = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Any:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = '''google/ddpm-cifar10-32'''
__UpperCamelCase :str = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :int = DDIMScheduler()
__UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddim.to(__lowercase)
ddim.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Tuple = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[str] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = '''google/ddpm-ema-bedroom-256'''
__UpperCamelCase :Optional[Any] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddpm.to(__lowercase)
ddpm.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Optional[int] = ddpm(generator=__lowercase , output_type='''numpy''').images
__UpperCamelCase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase :Any = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 167 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = val
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Union[str, Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : Tuple = value
else:
A_ : List[Any] = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : str = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A_ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:256, :]
A_ : Optional[Any] = in_proj_bias[:256]
A_ : List[Any] = in_proj_weight[256:512, :]
A_ : List[Any] = in_proj_bias[256:512]
A_ : List[str] = in_proj_weight[-256:, :]
A_ : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : Tuple = in_proj_weight[:256, :]
A_ : Union[str, Any] = in_proj_bias[:256]
A_ : Dict = in_proj_weight[256:512, :]
A_ : List[str] = in_proj_bias[256:512]
A_ : Any = in_proj_weight[-256:, :]
A_ : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A_ : List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
A_ : str = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : List[str] = in_proj_weight_cross_attn[:256, :]
A_ : Any = in_proj_bias_cross_attn[:256]
A_ : List[str] = in_proj_weight_cross_attn[256:512, :]
A_ : Any = in_proj_bias_cross_attn[256:512]
A_ : Tuple = in_proj_weight_cross_attn[-256:, :]
A_ : int = in_proj_bias_cross_attn[-256:]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : int = image.size
A_ : Optional[int] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : List[str] = 800 if '''detection''' in checkpoint_url else 1_000
A_ : Optional[int] = target_max_size / current_max_size
A_ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = F.to_tensor(SCREAMING_SNAKE_CASE )
A_ : str = F.normalize(SCREAMING_SNAKE_CASE , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info('''Converting model...''' )
# load original state dict
A_ : Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Tuple = rename_backbone_keys(SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : Union[str, Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : Dict = state_dict.pop(SCREAMING_SNAKE_CASE )
A_ : int = val
# create HuggingFace model and load state dict
A_ : Optional[int] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Any = 15
A_ : Union[str, Any] = 2
A_ : Optional[Any] = {0: '''table''', 1: '''table rotated'''}
A_ : int = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
else:
A_ : str = 125
A_ : List[Any] = 6
A_ : Tuple = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
A_ : int = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 )
A_ : List[Any] = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
A_ : Optional[Any] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=SCREAMING_SNAKE_CASE )
A_ : Tuple = Image.open(SCREAMING_SNAKE_CASE ).convert('''RGB''' )
A_ : List[Any] = normalize(resize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
A_ : Dict = model(SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
A_ : str = (1, 15, 3)
A_ : Dict = torch.tensor(
[[-6.7_8_9_7, -16.9_985, 6.7_9_3_7], [-8.0_1_8_6, -22.2_192, 6.9_6_7_7], [-7.3_1_1_7, -21.0_708, 7.4_0_5_5]] )
A_ : Optional[int] = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
A_ : List[str] = (1, 125, 7)
A_ : Any = torch.tensor(
[[-18.1_430, -8.3_2_1_4, 4.8_2_7_4], [-18.4_685, -7.1_3_6_1, -4.2_6_6_7], [-26.3_693, -9.3_4_2_9, -4.9_9_6_2]] )
A_ : Optional[int] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : Tuple = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase = {ord(char) for char in VALID_CHARS}
UpperCamelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = ""
A_ : int
A_ : int
A_ : int
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
A_ : Tuple = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE )
return decoded
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : list[str] = []
for key in product(SCREAMING_SNAKE_CASE , repeat=3 ):
A_ : Tuple = try_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE )
return possibles
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return [possible for possible in possibles if common_word in possible.lower()]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "p059_cipher.txt" ):
A_ : list[int]
A_ : list[str]
A_ : str
A_ : str
A_ : str = Path(SCREAMING_SNAKE_CASE ).parent.joinpath(SCREAMING_SNAKE_CASE ).read_text(encoding='''utf-8''' )
A_ : Optional[Any] = [int(SCREAMING_SNAKE_CASE ) for number in data.strip().split(''',''' )]
A_ : Optional[int] = filter_valid_chars(SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
A_ : Optional[Any] = filter_common_word(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
break
A_ : int = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 152 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case = ['''text''', '''image''', '''audio''']
def snake_case ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
inputs.append(create_inputs(lowerCAmelCase_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def snake_case ( lowerCAmelCase_ ) -> str:
_snake_case = []
for output in outputs:
if isinstance(lowerCAmelCase_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowerCAmelCase_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowerCAmelCase_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
_snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = create_inputs(self.tool.inputs )
_snake_case = self.tool(*__lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
_snake_case = [outputs]
self.assertListEqual(output_types(__lowerCamelCase ) , self.tool.outputs )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = create_inputs(self.tool.inputs )
_snake_case = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowerCamelCase , self.tool.outputs ):
_snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = create_inputs(self.tool.inputs )
_snake_case = []
for _input, input_type in zip(__lowerCamelCase , self.tool.inputs ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_snake_case = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
| 103 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : List[str] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a ( _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = [False] * len(_lowercase )
__UpperCAmelCase : Any = [-1] * len(_lowercase )
def dfs(_lowercase : Dict , _lowercase : List[str] ):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase , 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase , 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase :Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 266 |
'''simple docstring'''
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = len(_lowercase )
while cur > 1:
# Find the maximum number in arr
__UpperCAmelCase : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCAmelCase : Any = arr[mi::-1] + arr[mi + 1 : len(_lowercase )]
# Reverse whole list
__UpperCAmelCase : List[str] = arr[cur - 1 :: -1] + arr[cur : len(_lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCAmelCase :Any = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase :Optional[int] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted)) | 266 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''glpn'''
def __init__( self : Tuple ,A_ : Optional[int]=3 ,A_ : Union[str, Any]=4 ,A_ : Optional[int]=[2, 2, 2, 2] ,A_ : List[str]=[8, 4, 2, 1] ,A_ : List[str]=[32, 64, 160, 256] ,A_ : List[str]=[7, 3, 3, 3] ,A_ : List[Any]=[4, 2, 2, 2] ,A_ : Dict=[1, 2, 5, 8] ,A_ : List[str]=[4, 4, 4, 4] ,A_ : Union[str, Any]="gelu" ,A_ : Optional[Any]=0.0 ,A_ : Dict=0.0 ,A_ : Any=0.02 ,A_ : List[Any]=0.1 ,A_ : Optional[int]=1e-6 ,A_ : int=64 ,A_ : Optional[int]=10 ,A_ : List[str]=-1 ,**A_ : str ,) -> Optional[int]:
super().__init__(**A_ )
A = num_channels
A = num_encoder_blocks
A = depths
A = sr_ratios
A = hidden_sizes
A = patch_sizes
A = strides
A = mlp_ratios
A = num_attention_heads
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = drop_path_rate
A = layer_norm_eps
A = decoder_hidden_size
A = max_depth
A = head_in_index | 91 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ):
A , A = position
A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
A = []
for position in positions:
A , A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case__ )
return permissible_positions
def _snake_case ( snake_case__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ):
if is_complete(snake_case__ ):
return True
for position in get_valid_pos(snake_case__ , len(snake_case__ ) ):
A , A = position
if board[y][x] == 0:
A = curr + 1
if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ):
return True
A = 0
return False
def _snake_case ( snake_case__ : int ):
A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A = 1
if open_knight_tour_helper(snake_case__ , (i, j) , 1 ):
return board
A = 0
A = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 | 1 |
def snake_case( __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : Dict = [0 for i in range(len(__magic_name__ ) )]
# initialize interval's left pointer and right pointer
lowercase , lowercase : Tuple = 0, 0
for i in range(1 , len(__magic_name__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase : Optional[Any] = min_edge
while go_next(__magic_name__ , __magic_name__ , __magic_name__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase , lowercase : Tuple = i, i + z_result[i] - 1
return z_result
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(__magic_name__ ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case( __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase : Dict = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__magic_name__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 596 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : int = [-1] * len(__magic_name__ )
def dfs(__magic_name__ , __magic_name__ ):
lowercase : str = True
lowercase : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(__magic_name__ , 1 - c )
for i in range(len(__magic_name__ ) ):
if not visited[i]:
dfs(__magic_name__ , 0 )
for i in range(len(__magic_name__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 596 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase_ ( __A ):
def __init__( self : str , _lowercase : Dict = 1_0_1 ):
lowerCAmelCase__ : Any = length
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : Union[str, Any] , _lowercase : int ):
return i
class lowercase_ :
def __call__( self : Any , _lowercase : Optional[Any] ):
return {"input_ids": torch.tensor(snake_case_ ), "labels": torch.tensor(snake_case_ )}
class lowercase_ ( nn.Module ):
def __init__( self : Dict ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase__ : str = nn.Linear(1_2_0 , 8_0 )
def _lowerCAmelCase ( self : List[Any] , _lowercase : str , _lowercase : int=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase_ ( __A ):
@require_torch_neuroncore
def _lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowerCAmelCase__ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : int = f"--output_dir {output_dir}".split()
lowerCAmelCase__ : Optional[int] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(snake_case_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase_ ( __A ):
@require_torch_multi_gpu
def _lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : str = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowerCAmelCase__ : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : List[str] = f"--output_dir {output_dir}".split()
lowerCAmelCase__ : int = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(snake_case_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCAmelCase = HfArgumentParser((TrainingArguments,))
__UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCAmelCase = DummyDataset(dataset_length)
def lowercase__ ( lowerCamelCase : int ) -> str:
lowerCAmelCase__ : Union[str, Any] = list(range(len(lowerCamelCase ) ) )
lowerCAmelCase__ : Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
__UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase = 2
__UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase = None
| 308 | """simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCAmelCase (unittest.TestCase , __A ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = load_tool("""text-classification""" )
self.tool.setup()
A__ : Any = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 363 | 0 |
def UpperCamelCase_ ( a_ = 100 ) ->int:
A =set()
A =0
A =n + 1 # maximum limit
for a in range(2 , a_ ):
for b in range(2 , a_ ):
A =a**b # calculates the current power
collect_powers.add(a_ ) # adds the result to the set
return len(a_ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
def A ( self : Dict , UpperCAmelCase : Optional[int] ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowerCAmelCase_ : int = {'source': 'What is love ?', 'target': 'life'}
lowerCAmelCase_ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase_ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_UpperCAmelCase , F'{split}.{field}' ) , """w""" ) as f:
f.write(_UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ):
lowerCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : int = os.path.join(_UpperCAmelCase , """output""" )
lowerCAmelCase_ : Tuple = os.path.join(_UpperCAmelCase , """data""" )
self._create_dummy_data(data_dir=_UpperCAmelCase )
lowerCAmelCase_ : Dict = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase_ : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
lowerCAmelCase_ : Optional[int] = os.path.join(_UpperCAmelCase , """metrics.json""" )
with open(_UpperCAmelCase ) as f:
lowerCAmelCase_ : List[str] = json.load(_UpperCAmelCase )
return result
@require_torch_gpu
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def A ( self : List[Any] ):
lowerCAmelCase_ : str = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 600 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase__ :
def __init__( self , UpperCamelCase__ = None ):
if components is None:
A__ : Optional[int] = []
A__ : Dict = list(UpperCamelCase__ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self , UpperCamelCase__ ):
A__ : Optional[int] = len(self )
if size == len(UpperCamelCase__ ):
A__ : Optional[int] = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self , UpperCamelCase__ ):
A__ : Optional[Any] = len(self )
if size == len(UpperCamelCase__ ):
A__ : List[Any] = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , UpperCamelCase__ ):
...
@overload
def __mul__( self , UpperCamelCase__ ):
...
def __mul__( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , (float, int) ):
A__ : Optional[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A__ : Optional[int] = len(self )
A__ : List[Any] = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('''invalid operand!''' )
def __snake_case ( self ):
return Vector(self.__components )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
assert -len(self.__components ) <= pos < len(self.__components )
A__ : Tuple = value
def __snake_case ( self ):
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
A__ : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = False ):
A__ : Dict = self * other
A__ : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (isinstance(__UpperCamelCase , __UpperCamelCase ))
A__ : Union[str, Any] = [0] * dimension
A__ : str = 1
return Vector(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : Vector , __UpperCamelCase : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and (isinstance(__UpperCamelCase , (int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
random.seed(__UpperCamelCase )
A__ : Optional[Any] = [random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
class UpperCamelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = matrix
A__ : Optional[int] = w
A__ : Any = h
def __str__( self ):
A__ : Optional[int] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A__ : Dict = []
for i in range(self.__height ):
A__ : Any = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , UpperCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A__ : List[str] = []
for i in range(self.__height ):
A__ : int = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , UpperCamelCase__ ):
...
@overload
def __mul__( self , UpperCamelCase__ ):
...
def __mul__( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A__ : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
A__ : Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A__ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def __snake_case ( self ):
return self.__height
def __snake_case ( self ):
return self.__width
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A__ : Any = value
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
A__ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A__ : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('''Indices out of bounds''' )
def __snake_case ( self ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
A__ : list[list[float]] = [[0] * n for _ in range(__UpperCamelCase )]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
random.seed(__UpperCamelCase )
A__ : list[list[float]] = [
[random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )
]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) | 701 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""") | 55 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.