code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "ibert"
def __init__( self : Union[str, Any] ,A : Tuple=3_05_22 ,A : Optional[Any]=7_68 ,A : List[Any]=12 ,A : Optional[Any]=12 ,A : List[str]=30_72 ,A : Union[str, Any]="gelu" ,A : str=0.1 ,A : int=0.1 ,A : Dict=5_12 ,A : str=2 ,A : Any=0.02 ,A : str=1E-12 ,A : List[str]=1 ,A : str=0 ,A : Optional[Any]=2 ,A : Union[str, Any]="absolute" ,A : Optional[int]=False ,A : Any="none" ,**A : str ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = quant_mode
__A = force_dequant
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
UpperCamelCase_ : int = False
def _A ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
SCREAMING_SNAKE_CASE : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _A ( self : List[Any] , **UpperCAmelCase_ : str ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : int = "adapt act apte"
return input_text, output_text
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : List[str] = ["adapt", "act", "ap@@", "te"]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
SCREAMING_SNAKE_CASE : str = "I am a small frog."
SCREAMING_SNAKE_CASE : List[Any] = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : int = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
SCREAMING_SNAKE_CASE : Tuple = "I am a small frog ."
SCREAMING_SNAKE_CASE : Optional[int] = "."
SCREAMING_SNAKE_CASE : Dict = tok(UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = tok(UpperCAmelCase_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : int = """Pix2StructImageProcessor"""
__lowerCAmelCase : Union[str, Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_a : Union[str, Any] = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 2_0_4_8 , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_a : Any = self.tokenizer
_a : List[str] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_a : Optional[int] = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , **lowerCamelCase_ )
else:
# add pixel_values and bbox
_a : Dict = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , header_text=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
_a : Optional[Any] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if "attention_mask" in text_encoding:
_a : Optional[int] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_a : int = text_encoding.pop('input_ids' )
else:
_a : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCamelCase ( self ) -> Any:
_a : Optional[int] = self.tokenizer.model_input_names
_a : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 424 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 424 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
__UpperCAmelCase =AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCAmelCase =tokenizer('''Hello there''' , return_tensors='''np''').input_ids
__UpperCAmelCase =tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
__UpperCAmelCase =shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id)
__UpperCAmelCase =model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase).logits
__UpperCAmelCase =optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1])).mean()
__UpperCAmelCase =-(labels.shape[-1] * loss.item())
__UpperCAmelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 132 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : str = '''luke'''
def __init__(self , UpperCAmelCase=5_0_2_6_7 , UpperCAmelCase=5_0_0_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=2_5_6 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =entity_vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =entity_emb_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =use_entity_aware_attention
__UpperCAmelCase =classifier_dropout
| 132 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase : Optional[Any] = TypeVar("""_T""")
class _UpperCamelCase ( Generic[_T]):
'''simple docstring'''
def __init__( self , a_ = None ) -> None:
lowercase : list[_T] = list(iterable or [] )
lowercase : list[_T] = []
def __len__( self ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a__ ( self , a_ ) -> None:
self._stacka.append(a_ )
def a__ ( self ) -> _T:
lowercase : int = self._stacka.pop
lowercase : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 425 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> str:
for attribute in key.split("." ):
lowercase : Any = getattr(A ,A )
if weight_type is not None:
lowercase : Optional[Any] = getattr(A ,A ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Tuple = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ) -> int:
lowercase : List[Any] = []
lowercase : int = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(A )[0].split("." )[-2]
lowercase : Union[str, Any] = mapped_key.replace("*" ,A )
if "weight_g" in name:
lowercase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase : Tuple = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Any = "weight"
else:
lowercase : Tuple = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> Any:
lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase : Any = name.split("." )
lowercase : Dict = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ) -> Optional[Any]:
# load the pre-trained checkpoints
lowercase : Union[str, Any] = torch.load(A )
lowercase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
lowercase : Tuple = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase : List[str] = WavLMConfig.from_pretrained(A )
else:
lowercase : Union[str, Any] = WavLMConfig()
lowercase : Optional[Any] = WavLMModel(A )
recursively_load_weights(A ,A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 425 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = parent
__UpperCAmelCase: List[Any] = batch_size
__UpperCAmelCase: List[Any] = num_channels
__UpperCAmelCase: Tuple = image_size
__UpperCAmelCase: Any = min_resolution
__UpperCAmelCase: Dict = max_resolution
__UpperCAmelCase: Optional[Any] = do_resize
__UpperCAmelCase: Optional[int] = size if size is not None else {"""height""": 18, """width""": 20}
__UpperCAmelCase: List[str] = do_thumbnail
__UpperCAmelCase: Any = do_align_axis
__UpperCAmelCase: int = do_pad
__UpperCAmelCase: Union[str, Any] = do_normalize
__UpperCAmelCase: Union[str, Any] = image_mean
__UpperCAmelCase: Optional[Any] = image_std
def lowercase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = DonutImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_thumbnail""" ) )
self.assertTrue(hasattr(snake_case_ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(snake_case_ , """do_pad""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
__UpperCAmelCase: str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
__UpperCAmelCase: int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowercase_ ( self ):
'''simple docstring'''
pass
@is_flaky()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__UpperCAmelCase: Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__UpperCAmelCase: str = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__UpperCAmelCase: str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__UpperCAmelCase: Tuple = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase: str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__UpperCAmelCase: Tuple = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , ) | 523 | '''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a :
"""simple docstring"""
def __init__( self , snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = parent
__UpperCAmelCase: Dict = 13
__UpperCAmelCase: Optional[int] = 7
__UpperCAmelCase: List[str] = 30
__UpperCAmelCase: List[Any] = self.seq_length + self.mem_len
__UpperCAmelCase: int = 15
__UpperCAmelCase: Optional[int] = True
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: Union[str, Any] = 99
__UpperCAmelCase: Optional[int] = [10, 50, 80]
__UpperCAmelCase: str = 32
__UpperCAmelCase: Optional[Any] = 32
__UpperCAmelCase: Union[str, Any] = 4
__UpperCAmelCase: int = 8
__UpperCAmelCase: str = 128
__UpperCAmelCase: str = 2
__UpperCAmelCase: Tuple = 2
__UpperCAmelCase: Union[str, Any] = None
__UpperCAmelCase: str = 1
__UpperCAmelCase: Optional[Any] = 0
__UpperCAmelCase: int = 3
__UpperCAmelCase: Dict = self.vocab_size - 1
__UpperCAmelCase: int = 0.0_1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: List[str] = None
if self.use_labels:
__UpperCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: Optional[int] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFTransfoXLModel(snake_case_ )
__UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple()
__UpperCAmelCase: Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a}
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = TFTransfoXLLMHeadModel(snake_case_ )
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = model(snake_case_ ).to_tuple()
__UpperCAmelCase: Optional[Any] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
__UpperCAmelCase, __UpperCAmelCase: Tuple = model(snake_case_ ).to_tuple()
__UpperCAmelCase, __UpperCAmelCase: Dict = model([input_ids_a, mems_a] ).to_tuple()
__UpperCAmelCase: Union[str, Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
__UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = TFTransfoXLForSequenceClassification(snake_case_ )
__UpperCAmelCase: List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.prepare_config_and_inputs()
((__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase)): Dict = config_and_inputs
__UpperCAmelCase: List[str] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase = () if is_tf_available() else ()
__lowerCAmelCase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFTransfoXLModelTester(self )
__UpperCAmelCase: Any = ConfigTester(self , config_class=snake_case_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
__UpperCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
__UpperCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__UpperCAmelCase: int = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__UpperCAmelCase: Any = model.get_output_embeddings()
assert isinstance(snake_case_ , tf.keras.layers.Layer )
__UpperCAmelCase: int = model.get_bias()
assert name is None
else:
__UpperCAmelCase: Optional[int] = model.get_output_embeddings()
assert x is None
__UpperCAmelCase: str = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase: str = TFTransfoXLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
__UpperCAmelCase: str = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__UpperCAmelCase: Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__UpperCAmelCase: Dict = model.generate(snake_case_ , max_length=200 , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ ) | 523 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCAmelCase__ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = "left"
def __init__( self , __snake_case , __snake_case=False , __snake_case=True , __snake_case=False , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<sep>" , __snake_case="<pad>" , __snake_case="<cls>" , __snake_case="<mask>" , __snake_case=["<eop>", "<eod>"] , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Any = 3
_UpperCamelCase : List[Any] = do_lower_case
_UpperCamelCase : int = remove_space
_UpperCamelCase : Union[str, Any] = keep_accents
_UpperCamelCase : List[str] = vocab_file
_UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__snake_case)
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
_UpperCamelCase : Dict = self.__dict__.copy()
_UpperCamelCase : str = None
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self , __snake_case):
if self.remove_space:
_UpperCamelCase : Any = ' '.join(inputs.strip().split())
else:
_UpperCamelCase : Optional[Any] = inputs
_UpperCamelCase : str = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
_UpperCamelCase : List[str] = unicodedata.normalize('NFKD' , __snake_case)
_UpperCamelCase : Optional[int] = ''.join([c for c in outputs if not unicodedata.combining(__snake_case)])
if self.do_lower_case:
_UpperCamelCase : Dict = outputs.lower()
return outputs
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.preprocess_text(__snake_case)
_UpperCamelCase : Dict = self.sp_model.encode(__snake_case , out_type=__snake_case)
_UpperCamelCase : List[Any] = []
for piece in pieces:
if len(__snake_case) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
_UpperCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
_UpperCamelCase : int = cur_pieces[1:]
else:
_UpperCamelCase : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__snake_case)
else:
new_pieces.append(__snake_case)
return new_pieces
def A__ ( self , __snake_case):
return self.sp_model.PieceToId(__snake_case)
def A__ ( self , __snake_case):
return self.sp_model.IdToPiece(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[Any] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = False , __snake_case = None , __snake_case = True , **__snake_case , ):
_UpperCamelCase : List[Any] = kwargs.pop('use_source_tokenizer' , __snake_case)
_UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(__snake_case , skip_special_tokens=__snake_case)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__snake_case))
_UpperCamelCase : List[Any] = []
sub_texts.append(__snake_case)
else:
current_sub_text.append(__snake_case)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__snake_case))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_UpperCamelCase : Optional[Any] = ''.join(__snake_case)
_UpperCamelCase : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase : int = self.clean_up_tokenization(__snake_case)
return clean_text
else:
return text
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is not None:
return ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1, 1]
return ([0] * len(__snake_case)) + [1, 1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : Tuple = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648 | 1 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a__ : List[Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a__ : List[str] = 'main'
# Default branch name
a__ : Dict = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
a__ : str = 'aaaaaaa'
# This commit does not exist, so we should 404.
a__ : Dict = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
a__ : Optional[Any] = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __snake_case ( ) -> Tuple:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def __snake_case ( ) -> Dict:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __snake_case ( self : Optional[Any] , a__ : Optional[int] ):
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __snake_case ( self : Tuple , a__ : List[str] ):
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __snake_case ( self : List[Any] , a__ : List[str] ):
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __snake_case ( self : Any ):
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_tf
def __snake_case ( self : str ):
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_flax
def __snake_case ( self : Union[str, Any] ):
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 51 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase_ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=2 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=2 , )-> Tuple:
_A = parent
_A = batch_size
_A = patch_size
_A = max_length
_A = num_mel_bins
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = frequency_stride
_A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_A = (self.max_length - self.patch_size) // self.time_stride + 1
_A = frequency_out_dimension * time_out_dimension
_A = num_patches + 2
def UpperCamelCase ( self )-> int:
_A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, input_values, labels
def UpperCamelCase ( self )-> int:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Optional[Any]:
_A = ASTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_A = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self )-> Any:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase ( self )-> List[str]:
_A = ASTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase ( self )-> List[Any]:
pass
def UpperCamelCase ( self )-> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self )-> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['input_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def UpperCamelCase ( self )-> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ASTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_A = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_A , _A = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self )-> Dict:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase ( self )-> Any:
_A = self.default_feature_extractor
_A = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_UpperCamelCase )
_A = self.default_feature_extractor
_A , _A = prepare_audio()
_A = audio.squeeze().numpy()
_A = feature_extractor(_UpperCamelCase , sampling_rate=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCamelCase )
# verify the logits
_A = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_A = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=4 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = True
snake_case__ : List[str] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self ):
"""simple docstring"""
lowercase = FlaxRobertaModelTester(self )
@slow
def A__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowerCAmelCase )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
| 197 | """simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCAmelCase : List[Any] ="""\
Text data.
Second line of data."""
__lowerCAmelCase : Any ="""file"""
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with zstd.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase__ ) , """w""" ) as f:
f.write(lowerCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
lowercase = input_paths[compression_format]
lowercase = tmp_path / """cache"""
lowercase = DownloadConfig(cache_dir=lowerCAmelCase__ , extract_compressed_file=lowerCAmelCase__ )
lowercase = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ )
with open(lowerCAmelCase__ ) as f:
lowercase = f.read()
with open(lowerCAmelCase__ ) as f:
lowercase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
lowercase = """custom_cache"""
lowercase = """custom_extracted_dir"""
lowercase = tmp_path / """custom_extracted_path"""
if default_extracted:
lowercase = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowerCAmelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCAmelCase__ ) )
lowercase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase = xz_file
lowercase = (
DownloadConfig(extract_compressed_file=lowerCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase__ )
)
lowercase = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ )
assert Path(lowerCAmelCase__ ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Tuple:
'''simple docstring'''
lowercase = str(Path(lowerCAmelCase__ ).resolve() )
assert cached_path(lowerCAmelCase__ ) == text_file
# relative path
lowercase = str(Path(lowerCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase__ ) == text_file
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> List[Any]:
'''simple docstring'''
lowercase = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
# relative path
lowercase = """./__missing_file__.txt"""
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase = get_from_cache(f'tmp://{tmpfs_file}' )
with open(lowerCAmelCase__ ) as f:
lowercase = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> str:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowerCAmelCase__ ):
http_get("""https://huggingface.co""" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Any:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowerCAmelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowerCAmelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 197 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A = FlaxAutoencoderKL
@property
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 4
lowerCAmelCase__ :Dict = 3
lowerCAmelCase__ :int = (32, 32)
lowerCAmelCase__ :int = jax.random.PRNGKey(0 )
lowerCAmelCase__ :List[str] = jax.random.uniform(__lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ :Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
| 145 |
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase__ : Optional[Any] = mf_knapsack(i - 1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
else:
UpperCamelCase__ : Optional[int] = max(
mf_knapsack(i - 1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) ,mf_knapsack(i - 1 ,__lowerCamelCase ,__lowerCamelCase ,j - wt[i - 1] ) + val[i - 1] ,)
UpperCamelCase__ : Optional[int] = val
return f[i][j]
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 ,n + 1 ):
for w_ in range(1 ,w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase__ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] ,dp[i - 1][w_] )
else:
UpperCamelCase__ : Optional[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : list ,__lowerCamelCase : list ) -> List[Any]:
'''simple docstring'''
if not (isinstance(__lowerCamelCase ,(list, tuple) ) and isinstance(__lowerCamelCase ,(list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCamelCase__ : int = len(__lowerCamelCase )
if num_items != len(__lowerCamelCase ):
UpperCamelCase__ : int = (
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(__lowerCamelCase )} values'
)
raise ValueError(__lowerCamelCase )
for i in range(__lowerCamelCase ):
if not isinstance(wt[i] ,__lowerCamelCase ):
UpperCamelCase__ : Tuple = (
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : Tuple = knapsack(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : set = set()
_construct_solution(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return optimal_val, example_optional_set
def _lowercase ( __lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : set ) -> Dict:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowerCamelCase ,__lowerCamelCase ,i - 1 ,__lowerCamelCase ,__lowerCamelCase )
else:
optimal_set.add(__lowerCamelCase )
_construct_solution(__lowerCamelCase ,__lowerCamelCase ,i - 1 ,j - wt[i - 1] ,__lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = [3, 2, 4, 4]
_SCREAMING_SNAKE_CASE : Any = [4, 3, 2, 3]
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 6
_SCREAMING_SNAKE_CASE : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE : Dict = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 344 | 0 |
'''simple docstring'''
def lowercase_ ( __A : str , __A : Dict ) -> int:
"""simple docstring"""
return number | (1 << position)
def lowercase_ ( __A : Union[str, Any] , __A : List[Any] ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def lowercase_ ( __A : Union[str, Any] , __A : Any ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def lowercase_ ( __A : List[str] , __A : Tuple ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowercase_ ( __A : Optional[Any] , __A : Tuple ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE :Optional[Any] = """examples/"""
SCREAMING_SNAKE_CASE :Dict = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
SCREAMING_SNAKE_CASE :int = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
SCREAMING_SNAKE_CASE :Optional[int] = """README.md"""
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase_ = f.read()
UpperCamelCase_ , UpperCamelCase_ = REPLACE_PATTERNS[pattern]
UpperCamelCase_ = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> Union[str, Any]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = "🤗 Transformers currently provides the following architectures"
UpperCamelCase_ = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase_ = f.readlines()
# Find the start of the list.
UpperCamelCase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase_ = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Any:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase_ = f.read()
UpperCamelCase_ = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_=False )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase_ = default_version.base_version
elif patch:
UpperCamelCase_ = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCamelCase_ = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCamelCase_ = input(f"Which version are you releasing? [{default_version}]" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
UpperCamelCase_ = default_version
print(f"Updating version to {version}." )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = get_version()
UpperCamelCase_ = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCamelCase_ = current_version.base_version
# Check with the user we got that right.
UpperCamelCase_ = input(f"Which version are we developing now? [{dev_version}]" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
UpperCamelCase_ = dev_version
print(f"Updating version to {version}." )
global_version_update(SCREAMING_SNAKE_CASE_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 628 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :Optional[Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """mask2former"""
UpperCamelCase_ :Dict = ["""swin"""]
UpperCamelCase_ :List[Any] = {"""hidden_size""": """hidden_dim"""}
def __init__( self , _lowercase = None , _lowercase = 256 , _lowercase = 256 , _lowercase = 256 , _lowercase = 1_024 , _lowercase = "relu" , _lowercase = 6 , _lowercase = 10 , _lowercase = 8 , _lowercase = 0.0 , _lowercase = 2_048 , _lowercase = False , _lowercase = False , _lowercase = 4 , _lowercase = 255 , _lowercase = 100 , _lowercase = 0.1 , _lowercase = 2.0 , _lowercase = 5.0 , _lowercase = 5.0 , _lowercase = 12_544 , _lowercase = 3.0 , _lowercase = 0.75 , _lowercase = 0.02 , _lowercase = 1.0 , _lowercase = True , _lowercase = [4, 8, 16, 32] , _lowercase = None , **_lowercase , )-> Union[str, Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
UpperCamelCase_ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = backbone_config.pop("model_type" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(_lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported )}" )
UpperCamelCase_ = backbone_config
UpperCamelCase_ = feature_size
UpperCamelCase_ = mask_feature_size
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = encoder_feedforward_dim
UpperCamelCase_ = activation_function
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = dim_feedforward
UpperCamelCase_ = pre_norm
UpperCamelCase_ = enforce_input_projection
UpperCamelCase_ = common_stride
UpperCamelCase_ = ignore_value
UpperCamelCase_ = num_queries
UpperCamelCase_ = no_object_weight
UpperCamelCase_ = class_weight
UpperCamelCase_ = mask_weight
UpperCamelCase_ = dice_weight
UpperCamelCase_ = train_num_points
UpperCamelCase_ = oversample_ratio
UpperCamelCase_ = importance_sample_ratio
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = feature_strides
UpperCamelCase_ = output_auxiliary_logits
UpperCamelCase_ = decoder_layers
super().__init__(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> Optional[int]:
return cls(
backbone_config=_lowercase , **_lowercase , )
def UpperCAmelCase_ ( self )-> Dict[str, any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 628 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = list(range(len(A ) ) )
lowercase__ = [v / w for v, w in zip(A , A )]
index.sort(key=lambda A : ratio[i] , reverse=A )
lowercase__ = 0
lowercase__ = [0] * len(A )
for i in index:
if weight[i] <= capacity:
lowercase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | """simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class A ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = "data2vec-vision"
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=True , lowerCamelCase__=[3, 5, 7, 11] , lowerCamelCase__=[1, 2, 3, 6] , lowerCamelCase__=True , lowerCamelCase__=0.4 , lowerCamelCase__=256 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=255 , **lowerCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class A ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = version.parse("""1.11""" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 325 |
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 154 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase__ : Union[str, Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase__ : Optional[int] = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
"""simple docstring"""
snake_case__ : List[str] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case__ : List[Any] = bs[:]
snake_case__ : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
snake_case__ : List[str] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _a ( __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : Any = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Optional[Any] = char
return pairs
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any]="replace" , snake_case_ : List[str]="<s>" , snake_case_ : str="</s>" , snake_case_ : str="</s>" , snake_case_ : str="<s>" , snake_case_ : List[str]="<unk>" , snake_case_ : List[str]="<pad>" , snake_case_ : Optional[Any]="<mask>" , snake_case_ : str=False , **snake_case_ : Any , ):
'''simple docstring'''
snake_case__ : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
snake_case__ : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
snake_case__ : List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
snake_case__ : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
snake_case__ : Dict = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
snake_case__ : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
snake_case__ : Dict = json.load(snake_case_ )
snake_case__ : List[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : List[Any] = errors # how to handle errors in decoding
snake_case__ : int = bytes_to_unicode()
snake_case__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
snake_case__ : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
snake_case__ : Any = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = {}
snake_case__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : List[str] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def __magic_name__ ( self : str ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Any , snake_case_ : List[str] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : List[Any] = tuple(snake_case_ )
snake_case__ : Dict = get_pairs(snake_case_ )
if not pairs:
return token
while True:
snake_case__ : Union[str, Any] = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : Union[str, Any] = bigram
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = 0
while i < len(snake_case_ ):
try:
snake_case__ : Union[str, Any] = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : int = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : List[Any] = tuple(snake_case_ )
snake_case__ : Union[str, Any] = new_word
if len(snake_case_ ) == 1:
break
else:
snake_case__ : Any = get_pairs(snake_case_ )
snake_case__ : Optional[Any] = ''' '''.join(snake_case_ )
snake_case__ : List[Any] = word
return word
def __magic_name__ ( self : List[str] , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : List[str] = []
for token in re.findall(self.pat , snake_case_ ):
snake_case__ : Any = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def __magic_name__ ( self : int , snake_case_ : Any ):
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
return self.decoder.get(snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Dict = ''''''.join(snake_case_ )
snake_case__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Any = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : int = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
snake_case__ : Union[str, Any] = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
snake_case__ : Optional[Any] = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def __magic_name__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ : Optional[int] = [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[str]=False , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : str = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
snake_case__ : str = ''' ''' + text
return (text, kwargs)
def __magic_name__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Optional[Any] , snake_case_ : "Conversation" ):
'''simple docstring'''
snake_case__ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case_ )
snake_case__ : List[Any] = ''' '''.join(snake_case_ )
snake_case__ : Tuple = self.encode(snake_case_ )
if len(snake_case_ ) > self.model_max_length:
snake_case__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 721 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """bart"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , snake_case_ : Tuple=5_0_2_6_5 , snake_case_ : Dict=1_0_2_4 , snake_case_ : int=1_2 , snake_case_ : int=4_0_9_6 , snake_case_ : str=1_6 , snake_case_ : List[Any]=1_2 , snake_case_ : List[Any]=4_0_9_6 , snake_case_ : Any=1_6 , snake_case_ : str=0.0 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[Any]="gelu" , snake_case_ : List[Any]=1_0_2_4 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : Dict=0.0 , snake_case_ : str=False , snake_case_ : Optional[int]=True , snake_case_ : Any=3 , snake_case_ : int=1 , snake_case_ : int=0 , snake_case_ : Optional[Any]=2 , snake_case_ : str=True , snake_case_ : int=2 , snake_case_ : Union[str, Any]=2 , **snake_case_ : int , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = d_model
snake_case__ : Optional[int] = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : Tuple = encoder_attention_heads
snake_case__ : List[Any] = decoder_ffn_dim
snake_case__ : Optional[Any] = decoder_layers
snake_case__ : Tuple = decoder_attention_heads
snake_case__ : Any = dropout
snake_case__ : str = attention_dropout
snake_case__ : Optional[int] = activation_dropout
snake_case__ : Tuple = activation_function
snake_case__ : Optional[int] = init_std
snake_case__ : Optional[Any] = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : Any = classifier_dropout
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = encoder_layers
snake_case__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , snake_case_ ):
snake_case__ : Union[str, Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : List[str] = {0: '''batch'''}
snake_case__ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Tuple = self.num_layers
for i in range(snake_case_ ):
snake_case__ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = super().outputs
else:
snake_case__ : List[Any] = super(snake_case_ , self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(snake_case_ ):
snake_case__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
snake_case__ : Dict = seq_length if not self.use_past else 1
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : str = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
snake_case__ : Dict = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Optional[Any] = self.num_attention_heads
snake_case__ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[Any] = decoder_seq_length + 3
snake_case__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Optional[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : str = self.num_layers
snake_case__ : Any = min(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = max(snake_case_ , snake_case_ ) - min_num_layers
snake_case__ : Optional[int] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
snake_case__ : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __magic_name__ ( self : List[str] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : str = seqlen + 2
snake_case__ , snake_case__ : Union[str, Any] = self.num_layers
snake_case__ , snake_case__ : Optional[int] = self.num_attention_heads
snake_case__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : List[Any] = common_inputs['''attention_mask'''].dtype
snake_case__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
snake_case__ : Optional[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __magic_name__ ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : str = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : List[Any] = tokenizer.num_special_tokens_to_add(snake_case_ )
snake_case__ : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Any = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Any = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __magic_name__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : Union[str, Any] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 502 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
try:
with open(a_ , "rb" ) as flax_state_f:
__A = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__A = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__A = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
__A = ""
__A = flatten_dict(a_ , sep="." )
__A = pt_model.state_dict()
# keep track of unexpected & missing keys
__A = []
__A = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__A = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__A = flax_key_tuple_array[:-1] + ["weight"]
__A = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__A = flax_key_tuple_array[:-1] + ["weight"]
__A = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__A = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
__A = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
__A = ".".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__A = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
__A = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
__A = list(a_ )
if len(a_ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(a_ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
return pt_model
| 55 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowercase__ : str = logging.get_logger(__name__)
def A_ ( ) -> Dict:
'''simple docstring'''
__UpperCamelCase = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__UpperCamelCase = json.loads(_lowerCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__UpperCamelCase = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__UpperCamelCase = json.loads(_lowerCamelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , _lowerCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
_snake_case = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def A__ ( self )-> Tuple:
'''simple docstring'''
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , SCREAMING_SNAKE_CASE_ , )
@cached_property
def A__ ( self )-> "torch.device":
'''simple docstring'''
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
__UpperCamelCase = torch.device('''cpu''' )
__UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
__UpperCamelCase = smp.local_rank()
__UpperCamelCase = torch.device('''cuda''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
__UpperCamelCase = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
__UpperCamelCase = torch.device('''cuda''' , self.local_rank )
__UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__UpperCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
__UpperCamelCase = torch.device('''cuda''' , self.local_rank )
__UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
return device
@property
def A__ ( self )-> List[str]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self )-> List[str]:
'''simple docstring'''
return False
| 706 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 451 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Union[str, Any] = '''roberta-prelayernorm'''
def __init__( self , _lowercase=50_265 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 706 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowercase = 300 # TEMPERATURE (unit = K)
def A (__lowerCamelCase :float , __lowerCamelCase :float , __lowerCamelCase :float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A : Union[str, Any] = logging.get_logger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Optional[int] , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : int , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = offset
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Dict , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->Optional[int]:
_UpperCAmelCase = image.astype(np.floataa )
if offset:
_UpperCAmelCase = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , )->np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = to_numpy_array(__UpperCamelCase )
if do_resize:
_UpperCAmelCase = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
_UpperCAmelCase = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
_UpperCAmelCase = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
_UpperCAmelCase = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
_UpperCAmelCase = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : int , )->PIL.Image.Image:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = offset if offset is not None else self.offset
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_UpperCAmelCase = make_batched(__UpperCamelCase )
_UpperCAmelCase = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
_UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 602 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : list[str] )->Dict:
_UpperCAmelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str )->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->None:
_UpperCAmelCase = 0
for character in keyword:
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCAmelCase = len(self.adlist ) - 1
else:
_UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def lowercase__ ( self : List[str] )->None:
_UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = 0
while q:
_UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
_UpperCAmelCase = self.adlist[state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(
__UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_UpperCAmelCase = 0
_UpperCAmelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowercase__ ( self : Any , __UpperCamelCase : str )->dict[str, list[int]]:
_UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCAmelCase = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
_UpperCAmelCase = self.adlist[current_state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCAmelCase = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 602 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
A = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
A = DetaConfig(
backbone_config=lowerCAmelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCAmelCase__ , with_box_refine=lowerCAmelCase__ , two_stage=lowerCAmelCase__ , )
# set labels
A = 'huggingface/label-files'
if "o365" in model_name:
A = 366
A = 'object365-id2label.json'
else:
A = 91
A = 'coco-detection-id2label.json'
A = num_labels
A = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) ) , 'r' ) )
A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
A = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
A = dct.pop(lowerCAmelCase__ )
A = val
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:dim, :]
A = in_proj_bias[: dim]
A = in_proj_weight[
dim : dim * 2, :
]
A = in_proj_bias[
dim : dim * 2
]
A = in_proj_weight[
-dim :, :
]
A = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
A = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:hidden_size, :]
A = in_proj_bias[:hidden_size]
A = in_proj_weight[
hidden_size : hidden_size * 2, :
]
A = in_proj_bias[hidden_size : hidden_size * 2]
A = in_proj_weight[-hidden_size:, :]
A = in_proj_bias[-hidden_size:]
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
A = get_deta_config(lowerCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
A = torch.load(lowerCAmelCase__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
# rename keys
A = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
A = state_dict.pop(lowerCAmelCase__ )
A = val
if "input_proj" in key:
A = state_dict.pop(lowerCAmelCase__ )
A = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
A = state_dict.pop(lowerCAmelCase__ )
A = val
# finally, create HuggingFace model and load state dict
A = DetaForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
A = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(lowerCAmelCase__ )
# load image processor
A = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
A = prepare_img()
A = processor(images=lowerCAmelCase__ , return_tensors='pt' )
A = encoding['pixel_values']
A = model(pixel_values.to(lowerCAmelCase__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
A = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
A = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCAmelCase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCAmelCase__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case :Union[str, Any] =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 701 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case :Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , lowerCAmelCase__ , )
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
A = [image]
if isinstance(image[0] , PIL.Image.Image ):
A , A = image[0].size
A , A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
A = np.concatenate(lowerCAmelCase__ , axis=0 )
A = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
A = image.transpose(0 , 3 , 1 , 2 )
A = 2.0 * image - 1.0
A = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
A = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowerCamelCase_ ( lowerCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
A = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
A , A = mask[0].size
A , A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
A = np.concatenate(lowerCAmelCase__ , axis=0 )
A = mask.astype(np.floataa ) / 255.0
A = 0
A = 1
A = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
A = torch.cat(lowerCAmelCase__ , dim=0 )
return mask
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : UNetaDModel
A_ : RePaintScheduler
def __init__( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> int:
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : str , __UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCamelCase : int = 250 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 10 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
A = image
A = _preprocess_image(__UpperCamelCase )
A = original_image.to(device=self.device , dtype=self.unet.dtype )
A = _preprocess_mask(__UpperCamelCase )
A = mask_image.to(device=self.device , dtype=self.unet.dtype )
A = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A = original_image.shape
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.device )
A = eta
A = self.scheduler.timesteps[0] + 1
A = generator[0] if isinstance(__UpperCamelCase , __UpperCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute previous image: x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A = self.scheduler.undo_step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = t
A = (image / 2 + 0.5).clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 224 | 0 |
'''simple docstring'''
def _snake_case ( A_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError("""input must be a negative integer""" )
a_ : str = len(bin(_snake_case )[3:] )
a_ : str = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
a_ : str = (
(
"""1"""
+ """0""" * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 | """simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : int = get_logger(__name__)
A : Dict = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self :int , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self :Optional[Any] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self :List[str] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int , **lowerCamelCase_ :Any ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
UpperCamelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
else:
UpperCamelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :float ) -> Tuple:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
UpperCamelCase__ = temperature
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = scores / self.temperature
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :float , lowerCamelCase_ :float = -float("Inf" ) , lowerCamelCase_ :int = 1 ) -> str:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
UpperCamelCase__ = top_p
UpperCamelCase__ = filter_value
UpperCamelCase__ = min_tokens_to_keep
def __call__( self :str , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = lax.top_k(lowerCamelCase_ , scores.shape[-1] )
UpperCamelCase__ = jnp.full_like(lowerCamelCase_ , self.filter_value )
UpperCamelCase__ = jax.nn.softmax(lowerCamelCase_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase__ = jnp.roll(lowerCamelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
UpperCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jax.lax.sort_key_val(lowerCamelCase_ , lowerCamelCase_ )[-1]
return next_scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :float = -float("Inf" ) , lowerCamelCase_ :int = 1 ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
UpperCamelCase__ = max(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = filter_value
def __call__( self :Tuple , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = scores.shape
UpperCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase__ , UpperCamelCase__ = lax.top_k(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase__ = topk_scores.flatten()
UpperCamelCase__ = topk_indices.flatten() + shift
UpperCamelCase__ = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
UpperCamelCase__ = next_scores_flat.reshape(lowerCamelCase_ , lowerCamelCase_ )
return next_scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
UpperCamelCase__ = bos_token_id
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = max_length
UpperCamelCase__ = eos_token_id
def __call__( self :Optional[Any] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
UpperCamelCase__ = min_length
UpperCamelCase__ = eos_token_id
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = begin_index
def __call__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :list ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = list(lowerCamelCase_ )
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Any ) -> str:
"""simple docstring"""
UpperCamelCase__ = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase__ = force_token_array.at[index].set(lowerCamelCase_ )
UpperCamelCase__ = jnp.intaa(lowerCamelCase_ )
def __call__( self :List[Any] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(lowerCamelCase_ :Any ):
UpperCamelCase__ = scores.shape[0]
UpperCamelCase__ = self.force_token_array[generation_idx]
UpperCamelCase__ = jnp.ones_like(lowerCamelCase_ , dtype=scores.dtype ) * -float("inf" )
UpperCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase__ = lax.dynamic_update_slice(lowerCamelCase_ , lowerCamelCase_ , (0, current_token) )
return new_scores
UpperCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase_ ) , lambda: scores , ) , )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = generate_config.eos_token_id
UpperCamelCase__ = generate_config.no_timestamps_token_id
UpperCamelCase__ = generate_config.no_timestamps_token_id + 1
UpperCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ , "max_initial_timestamp_index" ):
UpperCamelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase__ = model_config.vocab_size
def __call__( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase_ :List[str] , lowerCamelCase_ :str ):
UpperCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase_ , )
UpperCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase_ , lowerCamelCase_ , )
return jnp.where(
lowerCamelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowerCamelCase_ , )
UpperCamelCase__ = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(cur_len == self.begin_index , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase_ , )
UpperCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase__ = jnp.where(
lowerCamelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowerCamelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase__ = jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any ):
UpperCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowerCamelCase_ , )
UpperCamelCase__ = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
return scores | 516 | 0 |
import string
from math import logaa
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> int:
a__ : str = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
a__ : int = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> tuple[int, int]:
a__ : int = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
a__ : Optional[Any] = corpus_without_punctuation.split("\n" )
a__ : Any = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> float:
return round(tf * idf , 3 )
| 720 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _a ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = " " ):
"""simple docstring"""
a__ : List[Any] = sentence_delimiter
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return list(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = []
for sent_idx, sentence in enumerate(__UpperCAmelCase ):
chars.extend(self.process_string(__UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
a__ : Any = 0
a__ : int = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Tuple = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 207 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
SCREAMING_SNAKE_CASE = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __a ( snake_case_ ):
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Union[str, Any] = SqueezeBertTokenizer
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple="[UNK]" , UpperCAmelCase_ : Union[str, Any]="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Any="[CLS]" , UpperCAmelCase_ : Any="[MASK]" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Dict , )-> Dict:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**lowerCAmelCase__ )
UpperCamelCase = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None )-> str:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 554 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( A : dict , A : str , A : set , A : set , A : dict , A : dict , A : PriorityQueue , A : dict , A : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE : str = cst_fwd.get(A , np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE : Tuple = new_cost_f
SCREAMING_SNAKE_CASE : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( A : str , A : str , A : dict , A : dict ):
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Any = {source: 0}
SCREAMING_SNAKE_CASE : Tuple = {destination: 0}
SCREAMING_SNAKE_CASE : Union[str, Any] = {source: None}
SCREAMING_SNAKE_CASE : Optional[int] = {destination: None}
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = queue_forward.get()
visited_forward.add(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = queue_backward.get()
visited_backward.add(A )
SCREAMING_SNAKE_CASE : Tuple = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
SCREAMING_SNAKE_CASE : Optional[int] = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE : int = shortest_distance
return shortest_path_distance
lowerCAmelCase_ : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCAmelCase_ : str = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = ["""pixel_values"""]
def __init__( self : Union[str, Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Tuple , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :List[str] = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase :Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase :List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase :Any = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="""crop_size""" )
UpperCamelCase :Any = do_resize
UpperCamelCase :Optional[int] = size
UpperCamelCase :Union[str, Any] = resample
UpperCamelCase :Tuple = do_center_crop
UpperCamelCase :Dict = crop_size
UpperCamelCase :Dict = do_rescale
UpperCamelCase :str = rescale_factor
UpperCamelCase :Dict = do_normalize
UpperCamelCase :int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase :str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase :Union[str, Any] = do_convert_rgb
def _A ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ):
UpperCamelCase :int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase :int = get_resize_output_image_size(__lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ):
UpperCamelCase :int = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Tuple , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : Tuple , ):
UpperCamelCase :Dict = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :Optional[Any] = size if size is not None else self.size
UpperCamelCase :int = get_size_dict(__lowerCamelCase , param_name="""size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :int = resample if resample is not None else self.resample
UpperCamelCase :int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase :Tuple = crop_size if crop_size is not None else self.crop_size
UpperCamelCase :Dict = get_size_dict(__lowerCamelCase , param_name="""crop_size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :int = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase :Any = image_mean if image_mean is not None else self.image_mean
UpperCamelCase :List[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase :int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase :str = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase :Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase :int = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase :Dict = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase :List[Any] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase :str = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase :Optional[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
UpperCamelCase :Dict = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase :Dict = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 590 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : Dict = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : Node | None
class _SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowerCamelCase : Iterable[int] ):
UpperCamelCase :Node | None = None
for i in sorted(__lowerCamelCase , reverse=__lowerCamelCase ):
UpperCamelCase :List[Any] = Node(__lowerCamelCase , self.head )
def __iter__( self : int ):
UpperCamelCase :List[str] = self.head
while node:
yield node.data
UpperCamelCase :Tuple = node.next_node
def __len__( self : Tuple ):
return sum(1 for _ in self )
def __str__( self : List[Any] ):
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : SortedLinkedList , __magic_name__ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__magic_name__ ) + list(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 590 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = KandinskyVaaPipeline
UpperCAmelCase_ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
]
UpperCAmelCase_ : Optional[Any] = ["""image_embeds""", """negative_image_embeds"""]
UpperCAmelCase_ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase_ : Tuple = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
torch.manual_seed(0 )
lowerCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) ->Union[str, Any]:
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = '''cpu'''
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = '''red cat, 4k photo'''
lowerCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 312 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''num_attention_heads''' ) )
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3 , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1_6 , __lowerCAmelCase=[1_2_8, 2_5_6, 3_8_4] , __lowerCAmelCase=[4, 6, 8] , __lowerCAmelCase=[2, 3, 4] , __lowerCAmelCase=[1_6, 1_6, 1_6] , __lowerCAmelCase=0 , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=2 , ):
"""simple docstring"""
__magic_name__ :str = parent
__magic_name__ :Optional[Any] = batch_size
__magic_name__ :Union[str, Any] = image_size
__magic_name__ :int = num_channels
__magic_name__ :int = kernel_size
__magic_name__ :List[str] = stride
__magic_name__ :List[str] = padding
__magic_name__ :List[str] = hidden_sizes
__magic_name__ :List[Any] = num_attention_heads
__magic_name__ :Tuple = depths
__magic_name__ :List[str] = key_dim
__magic_name__ :Optional[Any] = drop_path_rate
__magic_name__ :Dict = patch_size
__magic_name__ :Union[str, Any] = attention_ratio
__magic_name__ :Union[str, Any] = mlp_ratio
__magic_name__ :Tuple = initializer_range
__magic_name__ :Optional[int] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__magic_name__ :Optional[Any] = is_training
__magic_name__ :Dict = use_labels
__magic_name__ :Union[str, Any] = num_labels
__magic_name__ :Union[str, Any] = initializer_range
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :int = None
if self.use_labels:
__magic_name__ :int = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ :Dict = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = LevitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
__magic_name__ :List[Any] = (self.image_size, self.image_size)
__magic_name__ , __magic_name__ :Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
__magic_name__ :Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__magic_name__ :Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = self.num_labels
__magic_name__ :List[Any] = LevitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ :Optional[Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a__ = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = LevitModelTester(self )
__magic_name__ :Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Any = model_class(__lowerCAmelCase )
__magic_name__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :List[str] = [*signature.parameters.keys()]
__magic_name__ :int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
__magic_name__ :Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__magic_name__ :str = outputs.hidden_states
__magic_name__ :Optional[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
__magic_name__ :str = (self.model_tester.image_size, self.model_tester.image_size)
__magic_name__ , __magic_name__ :Tuple = image_size[0], image_size[1]
for _ in range(4 ):
__magic_name__ :str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__magic_name__ :Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__magic_name__ , __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :int = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ :List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
__magic_name__ :Dict = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__magic_name__ , __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :List[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__magic_name__ :str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
__magic_name__ :Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
__magic_name__ :Dict = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__magic_name__ :Optional[Any] = False
__magic_name__ :Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__magic_name__ :List[str] = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
__magic_name__ :str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
__magic_name__ :int = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Any = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
__magic_name__ :Tuple = problem_type['''title''']
__magic_name__ :int = problem_type['''num_labels''']
__magic_name__ :Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
__magic_name__ :str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
__magic_name__ :List[str] = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__magic_name__ :Dict = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
__magic_name__ :str = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A ( self ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :str = LevitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
__magic_name__ :Dict = self.default_image_processor
__magic_name__ :int = prepare_img()
__magic_name__ :Union[str, Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
__magic_name__ :Optional[Any] = model(**__lowerCAmelCase )
# verify the logits
__magic_name__ :Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :str = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 180 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase_ :
a__ = 42
# setable values
a__ = 42
a__ = 42
a__ = None
@classmethod
def A ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return cls(common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase )
@dataclass
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase ):
a__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
a__ = 42
@property
def A ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __lowerCAmelCase = 1_0_0_0 , __lowerCAmelCase = 0.0001 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "fixed_small" , __lowerCAmelCase = True , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = jnp.floataa , ):
"""simple docstring"""
__magic_name__ :Optional[int] = dtype
def A ( self , __lowerCAmelCase = None ):
"""simple docstring"""
if common is None:
__magic_name__ :Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__magic_name__ :Optional[Any] = jnp.array(1.0 , dtype=self.dtype )
__magic_name__ :str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
return sample
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = () ):
"""simple docstring"""
__magic_name__ :int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__magic_name__ :List[Any] = (jnp.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = state.common.alphas_cumprod[t]
__magic_name__ :Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ :Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__magic_name__ :Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__magic_name__ :Optional[Any] = jnp.clip(__lowerCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__magic_name__ :Dict = jnp.log(jnp.clip(__lowerCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__magic_name__ :Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__magic_name__ :Optional[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__magic_name__ :Union[str, Any] = variance
__magic_name__ :List[str] = state.common.betas[t]
__magic_name__ :Any = (predicted_variance + 1) / 2
__magic_name__ :str = frac * max_log + (1 - frac) * min_log
return variance
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , ):
"""simple docstring"""
__magic_name__ :List[str] = timestep
if key is None:
__magic_name__ :Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__magic_name__ , __magic_name__ :Dict = jnp.split(__lowerCAmelCase , sample.shape[1] , axis=1 )
else:
__magic_name__ :Optional[int] = None
# 1. compute alphas, betas
__magic_name__ :Any = state.common.alphas_cumprod[t]
__magic_name__ :int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__magic_name__ :Optional[int] = 1 - alpha_prod_t
__magic_name__ :Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ :Tuple = model_output
elif self.config.prediction_type == "v_prediction":
__magic_name__ :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ :Union[str, Any] = jnp.clip(__lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ :Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__magic_name__ :Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ :Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , num=1 )
__magic_name__ :Dict = jax.random.normal(__lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCAmelCase , __lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
__magic_name__ :List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__magic_name__ :int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCAmelCase , state=__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
return add_noise_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
return get_velocity_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 180 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase__ ='''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase__ =BASE_URL + '''/user'''
# https://github.com/settings/tokens
UpperCamelCase__ =os.environ.get('USER_TOKEN', '')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = {
'''Authorization''': f"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(UpperCAmelCase_, headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.') | 249 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
a :Optional[Any] = parent
a :str = batch_size
a :Tuple = seq_length
a :List[Any] = is_training
a :Optional[int] = use_attention_mask
a :List[str] = use_token_type_ids
a :str = use_labels
a :Optional[Any] = vocab_size
a :Optional[int] = hidden_size
a :Tuple = num_hidden_layers
a :Union[str, Any] = num_attention_heads
a :int = intermediate_size
a :int = hidden_act
a :int = hidden_dropout_prob
a :Union[str, Any] = attention_probs_dropout_prob
a :str = max_position_embeddings
a :Dict = type_vocab_size
a :str = type_sequence_label_size
a :List[str] = initializer_range
a :Optional[Any] = num_choices
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Any = None
if self.use_attention_mask:
a :Any = random_attention_mask([self.batch_size, self.seq_length] )
a :Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCamelCase , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.prepare_config_and_inputs()
a , a , a :str = config_and_inputs
a :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class_name in self.all_model_classes:
a :int = model_class_name.from_pretrained('''distilbert-base-uncased''' )
a :List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
a :Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
a :List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a :List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
a :Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , _lowerCamelCase )
a :int = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 445 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
lowercase : Dict = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
lowercase : List[str] = '''</w>'''
lowercase : Union[str, Any] = '''@@ '''
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : List[Any] = set()
snake_case_ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Tuple = char
return pairs
# Speech2Text2 has no max input length
lowercase : int = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = do_lower_case
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
snake_case_ : Optional[int] = json.load(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
snake_case_ : Optional[int] = None
snake_case_ : List[str] = None
else:
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split("\n" )[:-1]
snake_case_ : Tuple = [tuple(merge.split()[:2] ) for merge in merges]
snake_case_ : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Union[str, Any] = {}
@property
def _lowerCAmelCase ( self ) -> int:
return len(self.decoder )
def _lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ : Tuple = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
snake_case_ : List[Any] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ : Tuple = bigram
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : Union[str, Any] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[str] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : int = " ".join(_SCREAMING_SNAKE_CASE )
if word == "\n " + BPE_TOKEN_MERGES:
snake_case_ : Dict = "\n" + BPE_TOKEN_MERGES
if word.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = word.replace(_SCREAMING_SNAKE_CASE , "" )
snake_case_ : Optional[int] = word.replace(" " , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
snake_case_ : List[Any] = text.lower()
snake_case_ : Any = text.split()
snake_case_ : Any = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ : Union[str, Any] = self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
return result
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ : List[str] = " ".join(_SCREAMING_SNAKE_CASE )
# make sure @@ tokens are concatenated
snake_case_ : Tuple = "".join(string.split(_SCREAMING_SNAKE_CASE ) )
return string
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + "\n" )
snake_case_ : int = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case_ : List[Any] = token_index
writer.write(" ".join(_SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 721 |
def lowerCAmelCase__ ( _a : str , _a : int ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ : Optional[Any] = (boundary[1] - boundary[0]) / steps
snake_case_ : str = boundary[0]
snake_case_ : Dict = boundary[1]
snake_case_ : str = make_points(_a , _a , _a )
snake_case_ : str = 0.0
y += (h / 2.0) * f(_a )
for i in x_i:
# print(i)
y += h * f(_a )
y += (h / 2.0) * f(_a )
return y
def lowerCAmelCase__ ( _a : str , _a : Optional[Any] , _a : Tuple ):
snake_case_ : Tuple = a + h
while x < (b - h):
yield x
snake_case_ : List[str] = x + h
def lowerCAmelCase__ ( _a : Tuple ): # enter your function here
snake_case_ : Tuple = (x - 0) * (x - 0)
return y
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = 0.0 # Lower bound of integration
snake_case_ : List[Any] = 1.0 # Upper bound of integration
snake_case_ : Tuple = 10.0 # define number of steps or resolution
snake_case_ : List[Any] = [a, b] # define boundary of integration
snake_case_ : Any = method_a(_a , _a )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 114 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = IFInpaintingSuperResolutionPipeline
__SCREAMING_SNAKE_CASE :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__SCREAMING_SNAKE_CASE :Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Any ):
return self._get_superresolution_dummy_components()
def snake_case__ ( self : Optional[Any] , a__ : int , a__ : List[Any]=0 ):
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case__ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Union[str, Any] ):
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 432 |
'''simple docstring'''
def UpperCamelCase ( a , a ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 432 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = KandinskyInpaintPipeline
__snake_case : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case : Optional[int] = False
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: int , UpperCAmelCase_: Any , UpperCAmelCase_: str=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase ( self: str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """a hat"""
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 569 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
with open(snake_case__ ) as metadata_file:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=snake_case__ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
# Load the entity vocab file
_SCREAMING_SNAKE_CASE = load_entity_vocab(snake_case__ )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_SCREAMING_SNAKE_CASE = AddedToken("""<ent>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
_SCREAMING_SNAKE_CASE = AddedToken("""<ent2>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
_SCREAMING_SNAKE_CASE = state_dict["""embeddings.word_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_SCREAMING_SNAKE_CASE = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["""[MASK]"""]]
_SCREAMING_SNAKE_CASE = LukeModel(config=snake_case__ ).eval()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ ,task="""entity_classification""" )
_SCREAMING_SNAKE_CASE = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_SCREAMING_SNAKE_CASE = (39, 42)
_SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ,entity_spans=[span] ,add_prefix_space=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rstrip().split("""\t""" )
_SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 569 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase__( yaml.SafeLoader ):
'''simple docstring'''
def UpperCAmelCase ( self : str , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else key for key in keys]
lowercase__ = Counter(__UpperCAmelCase)
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''')
def UpperCAmelCase ( self : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple=False) -> Optional[int]:
"""simple docstring"""
lowercase__ = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase)
self._check_no_duplicates_on_constructed_node(__UpperCAmelCase)
return mapping
def _lowerCAmelCase ( A__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index('---' ) + 1
lowercase__ = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__A )
class UpperCAmelCase__( UpperCAmelCase_ ):
'''simple docstring'''
A : str = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls : int , lowerCAmelCase : Path) -> "DatasetMetadata":
"""simple docstring"""
with open(__UpperCAmelCase , encoding='utf-8') as readme_file:
lowercase__, lowercase__ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(__UpperCAmelCase)
else:
return cls()
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Path) -> List[str]:
"""simple docstring"""
if path.exists():
with open(__UpperCAmelCase , encoding='utf-8') as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(__UpperCAmelCase)
with open(__UpperCAmelCase , 'w' , encoding='utf-8') as readme_file:
readme_file.write(__UpperCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[str] = None) -> str:
"""simple docstring"""
if readme_content is not None:
lowercase__, lowercase__ = _split_yaml_from_readme(__UpperCAmelCase)
lowercase__ = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase__ = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase ( cls : Optional[int] , lowerCAmelCase : str) -> "DatasetMetadata":
"""simple docstring"""
lowercase__ = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCAmelCase)
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8')
a__ : int = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a__ : Optional[int] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a__ : Union[str, Any] = ap.parse_args()
a__ : str = Path(args.readme_filepath)
a__ : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 622 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """yolos"""
def __init__( self : Any, lowerCamelCase : Tuple=768, lowerCamelCase : Optional[Any]=12, lowerCamelCase : Any=12, lowerCamelCase : Any=3_072, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[Any]=0.0, lowerCamelCase : List[str]=0.0, lowerCamelCase : List[Any]=0.02, lowerCamelCase : str=1E-12, lowerCamelCase : List[Any]=[512, 864], lowerCamelCase : Any=16, lowerCamelCase : List[str]=3, lowerCamelCase : int=True, lowerCamelCase : Optional[int]=100, lowerCamelCase : Optional[int]=True, lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[Any]=1, lowerCamelCase : List[str]=5, lowerCamelCase : List[Any]=2, lowerCamelCase : List[Any]=5, lowerCamelCase : Any=2, lowerCamelCase : Dict=0.1, **lowerCamelCase : int, ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = num_detection_tokens
lowercase__ = use_mid_position_embeddings
lowercase__ = auxiliary_loss
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = version.parse("""1.11""" )
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :int = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 623 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
_snake_case = {'bert_for_seq_generation': 512}
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = []
UpperCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def __lowercase( self ) -> List[str]:
return self.sp_model.get_piece_size()
def __lowercase( self ) -> int:
__UpperCamelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> int:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__UpperCamelCase = []
__UpperCamelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
__UpperCamelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 567 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _a ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = 9, 14 # noqa: F841
__UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__UpperCamelCase = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__UpperCamelCase = mst(__lowercase )
__UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__UpperCamelCase = tuple(answer[:2] )
__UpperCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 567 | 1 |
from __future__ import annotations
_lowerCamelCase : Any = [True] * 1_000_001
_lowerCamelCase : Any = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCamelCase : Optional[int] = False
i += 1
def __a ( __lowerCAmelCase ) -> bool:
return seive[n]
def __a ( __lowerCAmelCase ) -> bool:
return any(digit in '02468' for digit in str(__lowerCAmelCase ) )
def __a ( __lowerCAmelCase = 100_0000 ) -> list[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCAmelCase ) and not contains_an_even_digit(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCAmelCase ) )]
if all(is_prime(__lowerCAmelCase ) for i in list_nums ):
result.append(__lowerCAmelCase )
return result
def __a ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""") | 352 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( __lowerCAmelCase ) -> int:
for param in module.parameters():
SCREAMING_SNAKE_CASE : List[Any] = False
def __a ( ) -> List[str]:
SCREAMING_SNAKE_CASE : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE : List[str] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __a ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def __a ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : str = datetime.now()
SCREAMING_SNAKE_CASE : Optional[int] = current_time.strftime('%H:%M:%S' )
return timestamp | 352 | 1 |
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 1.6_021e-19 # units = C
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase_: Tuple = TypeVar('T')
class lowercase__ (Generic[T] ):
"""simple docstring"""
__UpperCamelCase : deque[T] # Cache store of keys
__UpperCamelCase : set[T] # References of the keys in cache
__UpperCamelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : Optional[int] , __a : int ):
snake_case__ : List[Any] = deque()
snake_case__ : int = set()
if not n:
snake_case__ : List[str] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
snake_case__ : List[str] = n
def lowercase ( self : str , __a : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case__ : Optional[int] = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def lowercase ( self : Union[str, Any] ):
for k in self.dq_store:
print(__a )
def __repr__( self : int ):
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_: LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 648 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase_: Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_)
lowercase_: Dict = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = list(s_dict.keys())
for key in keys:
snake_case__ : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case__ : Union[str, Any] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_)
print(F'{key} -> {new_key}')
snake_case__ : Dict = s_dict.pop(UpperCAmelCase_)
return s_dict
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ , snake_case__ : Any = emb.weight.shape
snake_case__ : List[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_)
snake_case__ : int = emb.weight.data
return lin_layer
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
snake_case__ : Dict = os.path.basename(UpperCAmelCase_)
snake_case__ : Tuple = url.split("""/""")[-2]
snake_case__ : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if os.path.exists(UpperCAmelCase_) and not os.path.isfile(UpperCAmelCase_):
raise RuntimeError(F'{download_target} exists and is not a regular file')
if os.path.isfile(UpperCAmelCase_):
snake_case__ : Optional[int] = open(UpperCAmelCase_ , """rb""").read()
if hashlib.shaaaa(UpperCAmelCase_).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file')
with urllib.request.urlopen(UpperCAmelCase_) as source, open(UpperCAmelCase_ , """wb""") as output:
with tqdm(
total=int(source.info().get("""Content-Length""")) , ncols=80 , unit="""iB""" , unit_scale=UpperCAmelCase_ , unit_divisor=1_024) as loop:
while True:
snake_case__ : Union[str, Any] = source.read(8_192)
if not buffer:
break
output.write(UpperCAmelCase_)
loop.update(len(UpperCAmelCase_))
snake_case__ : Optional[int] = open(UpperCAmelCase_ , """rb""").read()
if hashlib.shaaaa(UpperCAmelCase_).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""")
return model_bytes
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case__ : List[Any] = _download(_MODELS[checkpoint_path])
else:
snake_case__ : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""")
snake_case__ : Union[str, Any] = original_checkpoint["""dims"""]
snake_case__ : Optional[int] = original_checkpoint["""model_state_dict"""]
snake_case__ : int = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCAmelCase_)
rename_keys(UpperCAmelCase_)
snake_case__ : List[Any] = True
snake_case__ : Dict = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
snake_case__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
snake_case__ : int = WhisperForConditionalGeneration(UpperCAmelCase_)
snake_case__ , snake_case__ : Tuple = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_)
if len(UpperCAmelCase_) > 0 and not set(UpperCAmelCase_) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}')
if tie_embeds:
snake_case__ : Dict = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
snake_case__ : Optional[int] = proj_out_weights
model.save_pretrained(UpperCAmelCase_)
if __name__ == "__main__":
lowercase_: int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase_: int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 648 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =(UniPCMultistepScheduler,)
lowercase : Union[str, Any] =(("""num_inference_steps""", 25),)
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase_ )
return config
def UpperCamelCase ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
lowercase_ :List[str] = dict(self.forward_default_kwargs )
lowercase_ :List[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
lowercase_ :Optional[Any] = self.dummy_sample
lowercase_ :List[str] = 0.1 * sample
lowercase_ :int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ :str = self.get_scheduler_config(**UpperCamelCase_ )
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
lowercase_ :Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
lowercase_ :Optional[int] = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
lowercase_ :int = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ , lowercase_ :List[Any] = sample, sample
for t in range(UpperCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
lowercase_ :List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowercase_ :Dict = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
lowercase_ :Tuple = dict(self.forward_default_kwargs )
lowercase_ :List[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
lowercase_ :List[Any] = self.dummy_sample
lowercase_ :Union[str, Any] = 0.1 * sample
lowercase_ :Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ :Dict = self.get_scheduler_config()
lowercase_ :Optional[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ :Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
lowercase_ :Tuple = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ :List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ :List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowercase_ :Dict = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , UpperCamelCase_=None , **UpperCamelCase_ ):
if scheduler is None:
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :int = self.get_scheduler_config(**UpperCamelCase_ )
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ )
lowercase_ :int = self.scheduler_classes[0]
lowercase_ :int = self.get_scheduler_config(**UpperCamelCase_ )
lowercase_ :int = scheduler_class(**UpperCamelCase_ )
lowercase_ :List[Any] = 10
lowercase_ :Union[str, Any] = self.dummy_model()
lowercase_ :Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Union[str, Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def UpperCamelCase ( self ):
lowercase_ :Dict = dict(self.forward_default_kwargs )
lowercase_ :Optional[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowercase_ :Union[str, Any] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_sample
lowercase_ :List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , '''set_timesteps''' ):
lowercase_ :Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ :str = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase_ :str = dummy_past_residuals[: scheduler.config.solver_order]
lowercase_ :int = scheduler.timesteps[5]
lowercase_ :Tuple = scheduler.timesteps[6]
lowercase_ :Dict = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowercase_ :Tuple = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase_ :str = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowercase_ :Optional[int] = self.full_loop(scheduler=UpperCamelCase_ )
lowercase_ :int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
lowercase_ :Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase_ :Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowercase_ :Dict = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase_ :Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase_ :str = self.full_loop(scheduler=UpperCamelCase_ )
lowercase_ :Optional[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def UpperCamelCase ( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , )
lowercase_ :Tuple = self.full_loop(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , )
assert not torch.isnan(UpperCamelCase_ ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=UpperCamelCase_ )
self.check_over_configs(lower_order_final=UpperCamelCase_ )
def UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCamelCase_ , time_step=0 )
def UpperCamelCase ( self ):
lowercase_ :str = self.full_loop()
lowercase_ :Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.full_loop(prediction_type='''v_prediction''' )
lowercase_ :str = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config(thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0 )
lowercase_ :int = scheduler_class(**UpperCamelCase_ )
lowercase_ :int = 10
lowercase_ :str = self.dummy_model()
lowercase_ :List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :int = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Dict = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase ( self , **UpperCamelCase_ ):
for scheduler_class in self.scheduler_classes:
lowercase_ :List[str] = self.get_scheduler_config(**UpperCamelCase_ )
lowercase_ :Any = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 441 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( _a , _a=0.999 , _a="cosine" , ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase_ :Any = []
for i in range(_a ):
lowercase_ :List[str] = i / num_diffusion_timesteps
lowercase_ :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) , _a ) )
return torch.tensor(_a , dtype=torch.floataa )
class UpperCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
lowercase : Tuple =[e.name for e in KarrasDiffusionSchedulers]
lowercase : Tuple =2
@register_to_config
def __init__( self , UpperCamelCase_ = 1000 , UpperCamelCase_ = 0.0_0085 , UpperCamelCase_ = 0.012 , UpperCamelCase_ = "linear" , UpperCamelCase_ = None , UpperCamelCase_ = "epsilon" , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = "linspace" , UpperCamelCase_ = 0 , ):
if trained_betas is not None:
lowercase_ :int = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ :List[str] = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ :int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ :Optional[int] = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
lowercase_ :Dict = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase_ :str = 1.0 - self.betas
lowercase_ :Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :str = use_karras_sigmas
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
if schedule_timesteps is None:
lowercase_ :List[str] = self.timesteps
lowercase_ :int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase_ :Dict = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
lowercase_ :Any = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
lowercase_ :Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , ):
lowercase_ :List[str] = self.index_for_timestep(UpperCamelCase_ )
lowercase_ :Optional[Any] = self.sigmas[step_index]
lowercase_ :Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
lowercase_ :Optional[Any] = num_inference_steps
lowercase_ :Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase_ :Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase_ :Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ :List[Any] = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase_ :Dict = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ :Dict = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
lowercase_ :Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase_ :Tuple = np.log(UpperCamelCase_ )
lowercase_ :Dict = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
if self.config.use_karras_sigmas:
lowercase_ :int = self._convert_to_karras(in_sigmas=UpperCamelCase_ , num_inference_steps=self.num_inference_steps )
lowercase_ :Optional[int] = np.array([self._sigma_to_t(UpperCamelCase_ , UpperCamelCase_ ) for sigma in sigmas] )
lowercase_ :Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase_ :Optional[int] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
lowercase_ :Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowercase_ :Any = torch.from_numpy(UpperCamelCase_ )
lowercase_ :List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase_ ).startswith('''mps''' ):
# mps does not support float64
lowercase_ :int = timesteps.to(UpperCamelCase_ , dtype=torch.floataa )
else:
lowercase_ :Optional[Any] = timesteps.to(device=UpperCamelCase_ )
# empty dt and derivative
lowercase_ :List[str] = None
lowercase_ :List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase_ :int = defaultdict(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
# get log sigma
lowercase_ :Union[str, Any] = np.log(UpperCamelCase_ )
# get distribution
lowercase_ :Optional[Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowercase_ :List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowercase_ :str = low_idx + 1
lowercase_ :Any = log_sigmas[low_idx]
lowercase_ :int = log_sigmas[high_idx]
# interpolate sigmas
lowercase_ :Dict = (low - log_sigma) / (low - high)
lowercase_ :str = np.clip(UpperCamelCase_ , 0 , 1 )
# transform interpolation to time range
lowercase_ :Dict = (1 - w) * low_idx + w * high_idx
lowercase_ :int = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :float = in_sigmas[-1].item()
lowercase_ :float = in_sigmas[0].item()
lowercase_ :int = 7.0 # 7.0 is the value used in the paper
lowercase_ :Optional[Any] = np.linspace(0 , 1 , UpperCamelCase_ )
lowercase_ :List[str] = sigma_min ** (1 / rho)
lowercase_ :List[Any] = sigma_max ** (1 / rho)
lowercase_ :Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , ):
lowercase_ :Any = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
lowercase_ :Any = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase_ :Optional[int] = self.sigmas[step_index]
lowercase_ :List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowercase_ :Optional[int] = self.sigmas[step_index - 1]
lowercase_ :Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase_ :List[Any] = 0
lowercase_ :List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase_ :Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
lowercase_ :List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ :Dict = sigma_hat if self.state_in_first_order else sigma_next
lowercase_ :Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowercase_ :List[str] = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
lowercase_ :str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase_ :Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase_ :Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowercase_ :str = derivative
lowercase_ :Union[str, Any] = dt
lowercase_ :Optional[int] = sample
else:
# 2. 2nd order / Heun's method
lowercase_ :str = (sample - pred_original_sample) / sigma_next
lowercase_ :List[str] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowercase_ :Union[str, Any] = self.dt
lowercase_ :Any = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowercase_ :List[Any] = None
lowercase_ :List[str] = None
lowercase_ :Dict = None
lowercase_ :int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase_ :List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
lowercase_ :Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase_ :Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase_ :Union[str, Any] = self.timesteps.to(original_samples.device )
lowercase_ :int = timesteps.to(original_samples.device )
lowercase_ :int = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
lowercase_ :Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase_ :List[str] = sigma.unsqueeze(-1 )
lowercase_ :List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 441 | 1 |
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
__A = []
__A = 1
while len(UpperCamelCase__ ) < 1E6:
constant.append(str(UpperCamelCase__ ) )
i += 1
__A = """""".join(UpperCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 637 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean
UpperCAmelCase__ : Dict = image_std
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : str = rescale_factor
UpperCAmelCase__ : List[str] = do_pad
def snake_case__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
if not batched:
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * h / w)
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h)
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : int = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : str = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0]
UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessingTester(self)
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCamelCase , """image_mean"""))
self.assertTrue(hasattr(_lowerCamelCase , """image_std"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_resize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_rescale"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_pad"""))
self.assertTrue(hasattr(_lowerCamelCase , """size"""))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
def snake_case__ ( self):
pass
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray)
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self):
# prepare image and target
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Dict = json.loads(f.read())
UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCAmelCase__ : Dict = DeformableDetrImageProcessor()
UpperCAmelCase__ : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : Any = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : List[Any] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
@slow
def snake_case__ ( self):
# prepare image, target and masks_path
UpperCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Optional[int] = json.loads(f.read())
UpperCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="""coco_panoptic""")
UpperCAmelCase__ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : List[str] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Tuple = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify masks
UpperCAmelCase__ : Dict = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase)
# verify orig_size
UpperCAmelCase__ : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase)) | 407 | 0 |
'''simple docstring'''
from functools import reduce
a_ : Any = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : str(int(lowerCAmelCase__ ) * int(lowerCAmelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase__ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 532 |
'''simple docstring'''
import datasets
a_ : List[Any] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
a_ : Optional[Any] = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
a_ : Optional[Any] = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __UpperCAmelCase ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Dict:
return {"accuracy": simple_accuracy(__magic_name__ , __magic_name__ )}
| 532 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = GPTSwaTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = GPTSwaTokenizer(UpperCAmelCase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = '''This is a test'''
A__ = '''This is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = '''<s>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(UpperCAmelCase__) , 2_000)
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ = GPTSwaTokenizer(UpperCAmelCase__)
A__ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [465, 287, 265, 631, 842])
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
UpperCAmelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
# fmt: off
self.assertListEqual(
UpperCAmelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
A__ = GPTSwaTokenizer(UpperCAmelCase__)
A__ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
A__ = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertListEqual(tokenizer.encode_fast(UpperCAmelCase__) , UpperCAmelCase__)
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertEqual(tokenizer.decode_fast(UpperCAmelCase__) , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
A__ = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCAmelCase__ , )
| 87 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
A__ : Optional[int] ='''.'''
if __name__ == "__main__":
A__ : int =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
A__ : Optional[int] =[]
A__ : Tuple =[]
with open(doctest_file_path) as fp:
for line in fp:
A__ : Optional[Any] =line.strip()
A__ : Union[str, Any] =os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
A__ : List[Any] ='''\n'''.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 207 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = prime_factors(_a )
if is_square_free(_a ):
return -1 if len(_a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
from __future__ import annotations
from collections.abc import MutableSequence
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ ):
if len(a__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
_lowerCamelCase = list(a__ )
_lowerCamelCase = degree
def __add__( self , a__ ):
if self.degree > polynomial_a.degree:
_lowerCamelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a__ )
else:
_lowerCamelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a__ )
def __sub__( self , a__ ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , a__ ):
_lowerCamelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
_lowerCamelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a__ )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCAmelCase ( self ):
_lowerCamelCase = [0] * self.degree
for i in range(self.degree ):
_lowerCamelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a__ )
def _UpperCAmelCase ( self , a__ = 0 ):
_lowerCamelCase = [0] * (self.degree + 2)
_lowerCamelCase = constant
for i in range(self.degree + 1 ):
_lowerCamelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a__ )
def __eq__( self , a__ ):
if not isinstance(a__ , a__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , a__ ):
return not self.__eq__(a__ )
| 297 | 0 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_A , _A , _A ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase (_A , _A , _A , _A="attention" ):
"""simple docstring"""
_lowerCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
_lowerCAmelCase : List[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_lowerCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
_lowerCAmelCase : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_lowerCAmelCase : List[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
_lowerCAmelCase : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_lowerCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
_lowerCAmelCase : Tuple = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase (_A , _A , _A , _A=False ):
"""simple docstring"""
if split_mlp_wi:
_lowerCAmelCase : Optional[int] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
_lowerCAmelCase : Optional[Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
_lowerCAmelCase : Optional[int] = (wi_a, wi_a)
else:
_lowerCAmelCase : Dict = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
_lowerCAmelCase : Dict = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase (_A , *, _A , _A , _A = False ):
"""simple docstring"""
_lowerCAmelCase : List[str] = traverse_util.flatten_dict(variables['target'] )
_lowerCAmelCase : Any = {'/'.join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase : int = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , _A )
_lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase : Tuple = old['token_embedder/embedding']
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : Optional[Any] = tax_layer_norm_lookup(_A , _A , 'encoder' , 'pre_attention_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = tax_attention_lookup(_A , _A , 'encoder' , 'attention' )
_lowerCAmelCase : str = layer_norm
_lowerCAmelCase : Union[str, Any] = k.T
_lowerCAmelCase : List[str] = o.T
_lowerCAmelCase : str = q.T
_lowerCAmelCase : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase : Tuple = tax_layer_norm_lookup(_A , _A , 'encoder' , 'pre_mlp_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase : Dict = tax_mlp_lookup(_A , _A , 'encoder' , _A )
_lowerCAmelCase : List[str] = layer_norm
if split_mlp_wi:
_lowerCAmelCase : List[Any] = wi[0].T
_lowerCAmelCase : List[str] = wi[1].T
else:
_lowerCAmelCase : Any = wi.T
_lowerCAmelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase : Dict = tax_relpos_bias_lookup(
_A , _A , 'encoder' ).T
_lowerCAmelCase : int = old['encoder/encoder_norm/scale']
if not scalable_attention:
_lowerCAmelCase : List[str] = tax_relpos_bias_lookup(
_A , 0 , 'encoder' ).T
_lowerCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(
_A , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : Any = tax_layer_norm_lookup(_A , _A , 'decoder' , 'pre_self_attention_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = tax_attention_lookup(_A , _A , 'decoder' , 'self_attention' )
_lowerCAmelCase : Optional[Any] = layer_norm
_lowerCAmelCase : Dict = k.T
_lowerCAmelCase : str = o.T
_lowerCAmelCase : Optional[int] = q.T
_lowerCAmelCase : str = v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase : Tuple = tax_layer_norm_lookup(_A , _A , 'decoder' , 'pre_cross_attention_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = tax_attention_lookup(_A , _A , 'decoder' , 'encoder_decoder_attention' )
_lowerCAmelCase : str = layer_norm
_lowerCAmelCase : Optional[Any] = k.T
_lowerCAmelCase : str = o.T
_lowerCAmelCase : Union[str, Any] = q.T
_lowerCAmelCase : int = v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase : Tuple = tax_layer_norm_lookup(_A , _A , 'decoder' , 'pre_mlp_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase : Any = tax_mlp_lookup(_A , _A , 'decoder' , _A )
_lowerCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
_lowerCAmelCase : Union[str, Any] = wi[0].T
_lowerCAmelCase : Optional[int] = wi[1].T
else:
_lowerCAmelCase : Optional[Any] = wi.T
_lowerCAmelCase : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase : Dict = tax_relpos_bias_lookup(_A , _A , 'decoder' ).T
_lowerCAmelCase : Optional[int] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : Union[str, Any] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : int = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_lowerCAmelCase : Any = state_dict['shared.weight']
return state_dict
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_A )
_lowerCAmelCase : Tuple = convert_tax_to_pytorch(
_A , num_layers=config.num_layers , is_encoder_only=_A , scalable_attention=_A )
_lowerCAmelCase : List[Any] = make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def lowercase (_A , _A , _A , _A = False , _A = False , ):
"""simple docstring"""
_lowerCAmelCase : Tuple = MTaConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase : Tuple = UMTaEncoderModel(_A )
else:
_lowerCAmelCase : Any = UMTaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A , _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print('Done' )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 444 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Any = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444 | 1 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 710 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "data2vec-text"
def __init__( self : Any , lowercase_ : Any=30522 , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=12 , lowercase_ : Dict=12 , lowercase_ : List[Any]=3072 , lowercase_ : str="gelu" , lowercase_ : int=0.1 , lowercase_ : Dict=0.1 , lowercase_ : str=512 , lowercase_ : Optional[int]=2 , lowercase_ : int=0.02 , lowercase_ : int=1e-12 , lowercase_ : Any=1 , lowercase_ : Any=0 , lowercase_ : List[Any]=2 , lowercase_ : Tuple="absolute" , lowercase_ : Optional[int]=True , lowercase_ : int=None , **lowercase_ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE_ : str = classifier_dropout
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 176 | 0 |
"""simple docstring"""
import functools
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Validation
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
UpperCamelCase : Dict = set(SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = ShapEImgaImgPipeline
_UpperCAmelCase = ["image"]
_UpperCAmelCase = ["image"]
_UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCAmelCase = False
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 8
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
_lowerCAmelCase : Dict = CLIPVisionModel(_A )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_A ,do_normalize=_A ,do_resize=_A ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCAmelCase : int = PriorTransformer(**_A )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : List[str] = ShapERenderer(**_A )
return model
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.dummy_prior
_lowerCAmelCase : Union[str, Any] = self.dummy_image_encoder
_lowerCAmelCase : List[Any] = self.dummy_image_processor
_lowerCAmelCase : List[str] = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1024 ,prediction_type='sample' ,use_karras_sigmas=_A ,clip_sample=_A ,clip_sample_range=1.0 ,)
_lowerCAmelCase : List[str] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
_lowerCAmelCase : Dict = torch.manual_seed(_A )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 'cpu'
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**_A )
_lowerCAmelCase : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) )
_lowerCAmelCase : Optional[int] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch_device == 'cpu'
_lowerCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_A ,relax_max_difference=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_A )
_lowerCAmelCase : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : str = 2
_lowerCAmelCase : Any = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**_A ,num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowerCAmelCase : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowerCAmelCase : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(
_A ,generator=_A ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A ,_A )
| 259 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Union[str, Any] = """EncodecFeatureExtractor"""
lowerCamelCase_ : Any = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]):
super().__init__(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = self.feature_extractor
SCREAMING_SNAKE_CASE_ :Tuple = False
def _snake_case ( self : List[Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=True):
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase , language=UpperCAmelCase , no_timestamps=UpperCAmelCase)
def __call__( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = kwargs.pop("audio" , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = kwargs.pop("sampling_rate" , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = kwargs.pop("text" , UpperCAmelCase)
if len(UpperCAmelCase) > 0:
SCREAMING_SNAKE_CASE_ :Dict = args[0]
SCREAMING_SNAKE_CASE_ :List[str] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
SCREAMING_SNAKE_CASE_ :List[str] = self.tokenizer(UpperCAmelCase , **UpperCAmelCase)
if audio is not None:
SCREAMING_SNAKE_CASE_ :Dict = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
SCREAMING_SNAKE_CASE_ :Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = audio_inputs["padding_mask"]
return inputs
def _snake_case ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Any):
SCREAMING_SNAKE_CASE_ :Tuple = kwargs.pop("audio" , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = kwargs.pop("padding_mask" , UpperCAmelCase)
if len(UpperCAmelCase) > 0:
SCREAMING_SNAKE_CASE_ :List[Any] = args[0]
SCREAMING_SNAKE_CASE_ :Dict = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCAmelCase , padding_mask=UpperCAmelCase)
else:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any]):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional = None):
SCREAMING_SNAKE_CASE_ :List[Any] = to_numpy(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = audio_values.shape
if padding_mask is None:
return list(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = to_numpy(UpperCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
SCREAMING_SNAKE_CASE_ :Optional[Any] = seq_len - padding_mask.shape[-1]
SCREAMING_SNAKE_CASE_ :Dict = 1 - self.feature_extractor.padding_value
SCREAMING_SNAKE_CASE_ :Optional[int] = np.pad(UpperCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = audio_values.tolist()
for i in range(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[Any] = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
SCREAMING_SNAKE_CASE_ :Dict = sliced_audio.reshape(UpperCAmelCase , -1)
return audio_values
| 140 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase_ ( unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_A = load_tool('text-classification' )
self.tool.setup()
_A = load_tool('text-classification', remote=UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = self.tool('That\'s quite cool', ['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = self.remote_tool('That\'s quite cool', ['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_A = self.tool(text='That\'s quite cool', labels=['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = self.remote_tool(text='That\'s quite cool', labels=['positive', 'negative'] )
self.assertEqual(UpperCamelCase__, 'positive' )
| 107 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Tuple = logging.get_logger(__name__)
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PIL.Image.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
A : Any = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
A : Any = get_size_dict(__UpperCAmelCase )
A : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
A : List[Any] = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Dict = do_resize
A : Tuple = size
A : Union[str, Any] = resample
A : Dict = do_center_crop
A : int = crop_size
A : Union[str, Any] = do_rescale
A : str = rescale_factor
A : Optional[Any] = do_normalize
A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PIL.Image.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
__UpperCAmelCase , size=(size['''height'''], size['''width''']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : Optional[Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
A : Optional[int] = do_resize if do_resize is not None else self.do_resize
A : Optional[Any] = resample if resample is not None else self.resample
A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Dict = image_std if image_std is not None else self.image_std
A : List[Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(__UpperCAmelCase )
A : Dict = crop_size if crop_size is not None else self.crop_size
A : Tuple = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Any = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
A : List[str] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
A : Dict = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
A : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
A : Tuple = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
A : Optional[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
A : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 542 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'AutoTokenizer'
lowerCAmelCase_ = ['tokenizer']
lowerCAmelCase_ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : str,__A : int,__A : int=None ):
super().__init__(__A )
_lowerCamelCase : Optional[Any] = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : List[Any],__A : Optional[Any]="speaker_embeddings_path.json",**__A : Union[str, Any] ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : int = get_file_from_repo(
__A,__A,subfolder=kwargs.pop("subfolder",__A ),cache_dir=kwargs.pop("cache_dir",__A ),force_download=kwargs.pop("force_download",__A ),proxies=kwargs.pop("proxies",__A ),resume_download=kwargs.pop("resume_download",__A ),local_files_only=kwargs.pop("local_files_only",__A ),use_auth_token=kwargs.pop("use_auth_token",__A ),revision=kwargs.pop("revision",__A ),)
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(__A,__A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_lowerCamelCase : Dict = None
else:
with open(__A ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(__A )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(__A,**__A )
return cls(tokenizer=__A,speaker_embeddings=__A )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : Optional[Any]="speaker_embeddings_path.json",__A : int="speaker_embeddings",__A : bool = False,**__A : Tuple,):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__A,__A,"v2" ),exist_ok=__A )
_lowerCamelCase : Dict = {}
_lowerCamelCase : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Union[str, Any] = self._load_voice_preset(__A )
_lowerCamelCase : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"],__A,f'{prompt_key}_{key}' ),voice_preset[key],allow_pickle=__A,)
_lowerCamelCase : List[str] = os.path.join(__A,f'{prompt_key}_{key}.npy' )
_lowerCamelCase : Any = tmp_dict
with open(os.path.join(__A,__A ),"w" ) as fp:
json.dump(__A,__A )
super().save_pretrained(__A,__A,**__A )
def lowerCamelCase_ ( self : Any,__A : str = None,**__A : Optional[int] ):
_lowerCamelCase : Optional[int] = self.speaker_embeddings[voice_preset]
_lowerCamelCase : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_lowerCamelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path","/" ),voice_preset_paths[key],subfolder=kwargs.pop("subfolder",__A ),cache_dir=kwargs.pop("cache_dir",__A ),force_download=kwargs.pop("force_download",__A ),proxies=kwargs.pop("proxies",__A ),resume_download=kwargs.pop("resume_download",__A ),local_files_only=kwargs.pop("local_files_only",__A ),use_auth_token=kwargs.pop("use_auth_token",__A ),revision=kwargs.pop("revision",__A ),)
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path","/" ),voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_lowerCamelCase : int = np.load(__A )
return voice_preset_dict
def lowerCamelCase_ ( self : Any,__A : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key],np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Tuple,__A : List[Any]=None,__A : Optional[int]=None,__A : str="pt",__A : Tuple=2_5_6,__A : Dict=False,__A : List[Any]=True,__A : Dict=False,**__A : Tuple,):
if voice_preset is not None and not isinstance(__A,__A ):
if (
isinstance(__A,__A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Optional[Any] = self._load_voice_preset(__A )
else:
if isinstance(__A,__A ) and not voice_preset.endswith(".npz" ):
_lowerCamelCase : List[str] = voice_preset + ".npz"
_lowerCamelCase : List[Any] = np.load(__A )
if voice_preset is not None:
self._validate_voice_preset_dict(__A,**__A )
_lowerCamelCase : List[str] = BatchFeature(data=__A,tensor_type=__A )
_lowerCamelCase : Dict = self.tokenizer(
__A,return_tensors=__A,padding="max_length",max_length=__A,return_attention_mask=__A,return_token_type_ids=__A,add_special_tokens=__A,**__A,)
if voice_preset is not None:
_lowerCamelCase : Any = voice_preset
return encoded_text | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
import requests
UpperCAmelCase = '''''' # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "Chicago" , __SCREAMING_SNAKE_CASE = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "Kolkata, India" , __SCREAMING_SNAKE_CASE = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 55.68 , __SCREAMING_SNAKE_CASE = 12.57 , __SCREAMING_SNAKE_CASE = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 84 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE_( self ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> torch.Tensor:
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(lowercase ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(lowercase )
lowerCamelCase_ = rays.view(lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(lowercase , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(lowercase , -1 , 2 )
lowerCamelCase_ = (
self.z.view(lowercase , 1 , 3 )
+ self.x.view(lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=lowercase )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase , *lowercase , 2 , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase , height=lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
lowerCamelCase_ = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
lowerCamelCase_ = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 463 | 0 |
class lowercase__ :
def __init__( self : List[Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : str ):
"""simple docstring"""
UpperCAmelCase__ = name
UpperCAmelCase__ = value
UpperCAmelCase__ = weight
def __repr__( self : List[str] ):
"""simple docstring"""
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.value
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return self.name
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.weight
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( __A , __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(__A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( __A , __A , __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = sorted(__A , key=__A , reverse=__A )
UpperCAmelCase__ = []
UpperCAmelCase__ , UpperCAmelCase__ = 0.0, 0.0
for i in range(len(__A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A = 6378137.0
A = 6356752.314245
A = 637_8137
def __UpperCAmelCase ( __A , __A , __A , __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ = atan((1 - flattening) * tan(radians(__A ) ) )
UpperCAmelCase__ = atan((1 - flattening) * tan(radians(__A ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ = haversine_distance(__A , __A , __A , __A ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ = (b_lata + b_lata) / 2
UpperCAmelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ = (sin(__A ) ** 2) * (cos(__A ) ** 2)
UpperCAmelCase__ = cos(sigma / 2 ) ** 2
UpperCAmelCase__ = (sigma - sin(__A )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ = (cos(__A ) ** 2) * (sin(__A ) ** 2)
UpperCAmelCase__ = sin(sigma / 2 ) ** 2
UpperCAmelCase__ = (sigma + sin(__A )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : str=7 , lowerCAmelCase : Dict=True , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=False , lowerCAmelCase : Dict=True , lowerCAmelCase : int=99 , lowerCAmelCase : int=32 , lowerCAmelCase : int=5 , lowerCAmelCase : str=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : int=0.02 , lowerCAmelCase : str=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __lowercase ( self : Dict ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : int ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowercase ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
lowerCAmelCase = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , ):
lowerCAmelCase = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , *lowerCAmelCase : str ):
lowerCAmelCase = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
lowerCAmelCase = self.seq_length // 2
lowerCAmelCase = 0
# first forward pass
lowerCAmelCase , lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCAmelCase = ids_tensor((1,) , lowerCAmelCase ).item() + 1
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )["""last_hidden_state"""]
lowerCAmelCase = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : int , *lowerCAmelCase : List[str] ):
lowerCAmelCase = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )["""last_hidden_state"""]
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
"""last_hidden_state"""
]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def __lowercase ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Any , *lowerCAmelCase : List[Any] , lowerCAmelCase : int=False ):
lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase ( self : Any , lowerCAmelCase : int , *lowerCAmelCase : Optional[int] ):
lowerCAmelCase = BioGptModel(lowerCAmelCase )
lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : str , *lowerCAmelCase : Tuple ):
lowerCAmelCase = self.num_labels
lowerCAmelCase = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ):
_a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_a = (BioGptForCausalLM,) if is_torch_available() else ()
_a = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
def __lowercase ( self : Any ):
lowerCAmelCase = BioGptModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : List[Any] ):
lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowerCAmelCase )
lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = """left"""
# Define PAD Token = EOS Token = 50256
lowerCAmelCase = tokenizer.eos_token
lowerCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
lowerCAmelCase = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCAmelCase = tokenizer(lowerCAmelCase , return_tensors="""pt""" , padding=lowerCAmelCase )
lowerCAmelCase = inputs["""input_ids"""].to(lowerCAmelCase )
lowerCAmelCase = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs["""attention_mask"""].to(lowerCAmelCase ) , )
lowerCAmelCase = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowerCAmelCase )
lowerCAmelCase = model.generate(input_ids=lowerCAmelCase )
lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
lowerCAmelCase = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowerCAmelCase )
lowerCAmelCase = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
lowerCAmelCase = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase ( self : List[str] ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self : Any ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = """multi_label_classification"""
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = torch.tensor([[2, 4805, 9, 656, 21]] )
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = 4_2384
lowerCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowerCAmelCase )
lowerCAmelCase = model.generate(
**lowerCAmelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCAmelCase , )
lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
lowerCAmelCase = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 169 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
a = 3_0_0 # TEMPERATURE (unit = K)
def lowercase (snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 478 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 478 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1024 ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =[], []
_UpperCamelCase =list(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_UpperCamelCase , _UpperCamelCase =sorted_examples[0]
def is_too_big(__SCREAMING_SNAKE_CASE ):
return tok(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCamelCase =new_src + ''' ''' + src
_UpperCamelCase =new_tgt + ''' ''' + tgt
if is_too_big(__SCREAMING_SNAKE_CASE ) or is_too_big(__SCREAMING_SNAKE_CASE ): # cant fit, finalize example
finished_src.append(__SCREAMING_SNAKE_CASE )
finished_tgt.append(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =src, tgt
else: # can fit, keep adding
_UpperCamelCase , _UpperCamelCase =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__SCREAMING_SNAKE_CASE )
finished_tgt.append(__SCREAMING_SNAKE_CASE )
return finished_src, finished_tgt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =Path(__SCREAMING_SNAKE_CASE )
save_path.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
for split in ["train"]:
_UpperCamelCase , _UpperCamelCase =data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
_UpperCamelCase =[x.rstrip() for x in Path(__SCREAMING_SNAKE_CASE ).open().readlines()]
_UpperCamelCase =[x.rstrip() for x in Path(__SCREAMING_SNAKE_CASE ).open().readlines()]
_UpperCamelCase , _UpperCamelCase =pack_examples(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f'''packed {split} split from {len(__SCREAMING_SNAKE_CASE )} examples -> {len(__SCREAMING_SNAKE_CASE )}.''' )
Path(save_path / f'''{split}.source''' ).open('''w''' ).write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
Path(save_path / f'''{split}.target''' ).open('''w''' ).write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
for split in ["val", "test"]:
_UpperCamelCase , _UpperCamelCase =data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__SCREAMING_SNAKE_CASE , save_path / f'''{split}.source''' )
shutil.copyfile(__SCREAMING_SNAKE_CASE , save_path / f'''{split}.target''' )
def _a ():
"""simple docstring"""
_UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__SCREAMING_SNAKE_CASE , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__SCREAMING_SNAKE_CASE , default=128 )
parser.add_argument('''--data_dir''' , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('''--save_path''' , type=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =parser.parse_args()
_UpperCamelCase =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 404 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple =logging.get_logger(__name__)
def a__ (__lowercase :Optional[Any] , __lowercase :Dict , __lowercase :List[Any] ) -> Tuple:
_A : int = os.path.abspath(__lowercase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
_A : Tuple = tf.train.list_variables(__lowercase )
_A : List[str] = []
_A : Union[str, Any] = []
_A : Optional[int] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_A : Tuple = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
_A : List[str] = name[1:]
# figure out how many levels deep the name is
_A : int = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__lowercase )
# read data
_A : Optional[Any] = tf.train.load_variable(__lowercase , __lowercase )
names.append('''/'''.join(__lowercase ) )
arrays.append(__lowercase )
logger.info(f"""Read a total of {len(__lowercase ):,} layers""" )
# Sanity check
if len(set(__lowercase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(__lowercase ) )})""" )
_A : Dict = list(set(__lowercase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__lowercase , __lowercase ):
_A : Any = full_name.split('''/''' )
_A : List[str] = model
_A : List[Any] = []
for i, m_name in enumerate(__lowercase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
_A : Tuple = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
_A : Union[str, Any] = getattr(__lowercase , '''embeddings''' )
_A : Union[str, Any] = getattr(__lowercase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
_A : Optional[Any] = getattr(__lowercase , '''encoder''' )
_A : Dict = getattr(__lowercase , '''layer''' )
_A : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
_A : List[str] = getattr(__lowercase , '''pooler''' )
_A : Optional[int] = getattr(__lowercase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
_A : List[Any] = getattr(__lowercase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
_A : Any = getattr(__lowercase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
_A : Optional[int] = getattr(__lowercase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
_A : Optional[Any] = getattr(__lowercase , '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
_A : Dict = getattr(__lowercase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
_A : List[str] = getattr(__lowercase , '''attention''' )
_A : Dict = getattr(__lowercase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
_A : Optional[int] = getattr(__lowercase , '''attention''' )
_A : Union[str, Any] = getattr(__lowercase , '''output''' )
_A : Dict = getattr(__lowercase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
_A : Optional[Any] = getattr(__lowercase , '''attention''' )
_A : Union[str, Any] = getattr(__lowercase , '''output''' )
_A : Optional[Any] = getattr(__lowercase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
_A : Optional[Any] = getattr(__lowercase , '''output''' )
_A : Dict = getattr(__lowercase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
_A : List[str] = getattr(__lowercase , '''output''' )
_A : List[Any] = getattr(__lowercase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
_A : Dict = getattr(__lowercase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
_A : str = getattr(__lowercase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
_A : Any = getattr(__lowercase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
_A : int = getattr(__lowercase , '''intermediate''' )
_A : Dict = getattr(__lowercase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
_A : int = getattr(__lowercase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
_A : Tuple = getattr(__lowercase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
_A : Union[str, Any] = getattr(__lowercase , '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
_A : str = '''.'''.join(__lowercase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __lowercase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , __lowercase ):
_A : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_A : Optional[int] = array.transpose()
if pointer.shape == array.shape:
_A : List[str] = torch.from_numpy(__lowercase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def a__ (__lowercase :List[Any] , __lowercase :str , __lowercase :List[Any] ) -> Any:
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
_A : Optional[Any] = BertConfig.from_json_file(__lowercase )
_A : Optional[int] = BertModel(__lowercase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
_UpperCamelCase : Dict =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase : Union[str, Any] ='\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCamelCase : List[str] ='\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCamelCase : str ='\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a__ (__lowercase :List[Any] , __lowercase :List[Any] ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a__ (__lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any]="binary" ) -> Optional[Any]:
_A : Union[str, Any] = simple_accuracy(__lowercase , __lowercase )
_A : str = float(fa_score(y_true=__lowercase , y_pred=__lowercase , average=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ (__lowercase :List[str] , __lowercase :Optional[Any] ) -> List[str]:
_A : str = {}
for id_pred, label in zip(__lowercase , __lowercase ):
_A : Optional[int] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_A : Tuple = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_A : Union[str, Any] = [(pred, label)]
_A , _A : List[Any] = [], []
for question, preds_labels in question_map.items():
_A , _A : List[str] = zip(*__lowercase )
_A : Union[str, Any] = fa_score(y_true=__lowercase , y_pred=__lowercase , average='''macro''' )
fas.append(__lowercase )
_A : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowercase ) )
ems.append(__lowercase )
_A : Optional[int] = float(sum(__lowercase ) / len(__lowercase ) )
_A : Dict = sum(__lowercase ) / len(__lowercase )
_A : List[Any] = float(fa_score(y_true=__lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None ,)
def A__ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def A__ ( self ,A__ ,A__ ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A__ ,A__ )}
elif self.config_name == "cb":
return acc_and_fa(A__ ,A__ ,fa_avg='''macro''' )
elif self.config_name == "record":
_A : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_A : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(A__ ,A__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A__ ,A__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A__ ,A__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 332 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def _A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ : str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _A ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(_a )
lowerCAmelCase__ : Tuple = inputs['prompt']
lowerCAmelCase__ : List[Any] = inputs['generator']
lowerCAmelCase__ : str = inputs['num_inference_steps']
lowerCAmelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
lowerCAmelCase__ : int = inputs['image']
else:
lowerCAmelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
lowerCAmelCase__ : List[str] = inputs['mask_image']
else:
lowerCAmelCase__ : Dict = None
if "original_image" in inputs:
lowerCAmelCase__ : Dict = inputs['original_image']
else:
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = pipe.encode_prompt(_a )
# inputs with prompt converted to embeddings
lowerCAmelCase__ : str = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowerCAmelCase__ : Optional[Any] = image
if mask_image is not None:
lowerCAmelCase__ : int = mask_image
if original_image is not None:
lowerCAmelCase__ : int = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
lowerCAmelCase__ : Optional[int] = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
lowerCAmelCase__ : Optional[int] = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(_a )
lowerCAmelCase__ : str = inputs['generator']
lowerCAmelCase__ : List[str] = inputs['num_inference_steps']
lowerCAmelCase__ : Any = inputs['output_type']
# inputs with prompt converted to embeddings
lowerCAmelCase__ : Optional[int] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowerCAmelCase__ : List[str] = image
if mask_image is not None:
lowerCAmelCase__ : Optional[Any] = mask_image
if original_image is not None:
lowerCAmelCase__ : str = original_image
lowerCAmelCase__ : Dict = pipe_loaded(**_a )[0]
lowerCAmelCase__ : List[Any] = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1e-4 )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(_a )
lowerCAmelCase__ : Optional[int] = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
lowerCAmelCase__ : Tuple = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase__ : str = self.get_dummy_inputs(_a )
lowerCAmelCase__ : Tuple = pipe_loaded(**_a )[0]
lowerCAmelCase__ : Dict = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1e-4 )
| 378 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[List[float]] = None , __a : Optional[int] = None , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Tuple = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
else:
return _interleave_iterable_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Dict = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a )
else:
return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
| 229 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__a = get_logger(__name__)
class __lowercase ( enum.Enum ):
UpperCamelCase = '''all_checks'''
UpperCamelCase = '''basic_checks'''
UpperCamelCase = '''no_checks'''
class __lowercase ( __snake_case ):
pass
class __lowercase ( __snake_case ):
pass
class __lowercase ( __snake_case ):
pass
class __lowercase ( __snake_case ):
pass
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) ->Optional[Any]:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase = """ for """ + verification_name if verification_name is not None else """"""
if len(lowerCAmelCase_ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowercase ( __snake_case ):
pass
class __lowercase ( __snake_case ):
pass
class __lowercase ( __snake_case ):
pass
class __lowercase ( __snake_case ):
pass
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Tuple:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
UpperCAmelCase = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase_ ) )
logger.info("""All the splits matched successfully.""" )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = True ) ->dict:
if record_checksum:
UpperCAmelCase = shaaaa()
with open(lowerCAmelCase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"""""" ):
m.update(lowerCAmelCase_ )
UpperCAmelCase = m.hexdigest()
else:
UpperCAmelCase = None
return {"num_bytes": os.path.getsize(lowerCAmelCase_ ), "checksum": checksum}
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 627 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = torch.nn.Linear(1_0 , 1_0 )
UpperCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase = Accelerator()
UpperCAmelCase = accelerator.prepare(__lowerCamelCase )
try:
pickle.loads(pickle.dumps(__lowerCamelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 627 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ : List[Any] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ : List[str] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
snake_case__ : int = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Dict = char
snake_case__ : int = set(__magic_name__ )
return pairs
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = vocab_file
snake_case__ : Optional[Any] = merges_file
snake_case__ : Dict = {}
snake_case__ : Any = 0
snake_case__ : int = 1
snake_case__ : int = 2
snake_case__ : List[Any] = 3
self.add_from_file(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
snake_case__ : Any = merges_handle.read().split("""\n""" )[:-1]
snake_case__ : int = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case__ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : List[str] = {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ):
return len(self.encoder )
def __UpperCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
snake_case__ : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case__ : Any = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case__ : Optional[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : Tuple = bigram
snake_case__ : Dict = []
snake_case__ : str = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
snake_case__ : Tuple = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : List[str] = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Dict = tuple(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case__ : Union[str, Any] = get_pairs(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = """@@ """.join(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = word[:-4]
snake_case__ : Dict = word
return word
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = []
snake_case__ : Any = re.findall(R"""\S+\n?""" , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = """ """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip()
return out_string
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , __SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
snake_case__ : Tuple = f.readlines()
for lineTmp in lines:
snake_case__ : Any = lineTmp.strip()
snake_case__ : Optional[Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
snake_case__ : Optional[int] = line[:idx]
snake_case__ : Union[str, Any] = len(self.encoder )
| 38 |
import math
snake_case__ = 10
snake_case__ = 7
snake_case__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase__ ( a : int = 20 ) -> str:
"""simple docstring"""
a__ :List[str] = math.comb(a , a )
a__ :Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
a__ :Union[str, Any] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 395 | 0 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE_ = deepcopy(SCREAMING_SNAKE_CASE_ )
elif os.path.exists(SCREAMING_SNAKE_CASE_ ):
with io.open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(SCREAMING_SNAKE_CASE_ )
else:
try:
SCREAMING_SNAKE_CASE_ = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
SCREAMING_SNAKE_CASE_ = json.loads(SCREAMING_SNAKE_CASE_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
SCREAMING_SNAKE_CASE_ = config
self.set_stage_and_offload()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value('''zero_optimization.stage''' , -1 )
# offload
SCREAMING_SNAKE_CASE_ = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE_ = set(['''cpu''', '''nvme'''] )
SCREAMING_SNAKE_CASE_ = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE_ = True
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split('''.''' )
SCREAMING_SNAKE_CASE_ = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE_ = config.get(SCREAMING_SNAKE_CASE_ )
if config is None:
return None, ds_key
return config, ds_key
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.find_config_node(SCREAMING_SNAKE_CASE_ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split('''.''' )
for node in nodes:
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = config.get(SCREAMING_SNAKE_CASE_ )
if config is None:
if must_exist:
raise ValueError(f'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value(SCREAMING_SNAKE_CASE_ )
return False if value is None else bool(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value(SCREAMING_SNAKE_CASE_ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
return self._stage == 2
def _lowercase (self ):
"""simple docstring"""
return self._stage == 3
def _lowercase (self ):
"""simple docstring"""
return self._offload
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = engine
def _lowercase (self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ , device_placement=SCREAMING_SNAKE_CASE_ , scaler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = hasattr(self.optimizer , '''overflow''' )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowercase (self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowercase (self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.0_01 , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = params
SCREAMING_SNAKE_CASE_ = lr
SCREAMING_SNAKE_CASE_ = weight_decay
SCREAMING_SNAKE_CASE_ = kwargs
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = optimizer
SCREAMING_SNAKE_CASE_ = total_num_steps
SCREAMING_SNAKE_CASE_ = warmup_num_steps
SCREAMING_SNAKE_CASE_ = kwargs | 708 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase__ ( lowercase__ : list[float] ):
snake_case : Optional[Any] = 0.00
snake_case : Any = 0
for resistor in resistors:
if resistor <= 0:
snake_case : List[str] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def UpperCamelCase__ ( lowercase__ : list[float] ):
snake_case : List[Any] = 0.00
snake_case : Optional[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = tempfile.mkdtemp()
snake_case : List[str] = SamImageProcessor()
snake_case : List[Any] = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.get_image_processor()
snake_case : Tuple = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : List[str] = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.get_image_processor()
snake_case : int = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Dict = [torch.ones((1, 3, 5, 5) )]
snake_case : Optional[Any] = [[1_764, 2_646]]
snake_case : List[Any] = [[683, 1_024]]
snake_case : int = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : Tuple = processor.post_process_masks(
SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
snake_case : Any = [np.ones((1, 3, 5, 5) )]
snake_case : Optional[int] = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE ):
snake_case : Tuple = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) )
@require_vision
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = tempfile.mkdtemp()
snake_case : Any = SamImageProcessor()
snake_case : List[str] = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_image_processor()
snake_case : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : str = self.prepare_image_inputs()
snake_case : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case : List[str] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = self.get_image_processor()
snake_case : List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Any = [tf.ones((1, 3, 5, 5) )]
snake_case : Dict = [[1_764, 2_646]]
snake_case : Optional[Any] = [[683, 1_024]]
snake_case : int = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : str = processor.post_process_masks(
SCREAMING_SNAKE_CASE , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
snake_case : str = [np.ones((1, 3, 5, 5) )]
snake_case : Any = processor.post_process_masks(
SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case : Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors="tf" )
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = tempfile.mkdtemp()
snake_case : str = SamImageProcessor()
snake_case : Dict = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.get_image_processor()
snake_case : List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case : str = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE )]
snake_case : str = [torch.tensor(SCREAMING_SNAKE_CASE )]
snake_case : int = [[1_764, 2_646]]
snake_case : List[Any] = [[683, 1_024]]
snake_case : Any = processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="tf" )
snake_case : Optional[int] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.get_image_processor()
snake_case : Optional[int] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Any = self.prepare_image_inputs()
snake_case : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" )["pixel_values"].numpy()
snake_case : Dict = processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )["pixel_values"].numpy()
snake_case : List[str] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="tf" )["pixel_values"].numpy()
snake_case : List[str] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 134 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any , A : Tuple ) -> Any:
"""simple docstring"""
__snake_case : Dict = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__snake_case : int = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__snake_case : int = F"""{src_lang}-{tgt_lang}"""
__snake_case : List[str] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(A , exist_ok=A )
__snake_case : Union[str, Any] = os.path.join(A , 'README.md' )
print(F"""Generating {path}""" )
with open(A , 'w' , encoding='utf-8' ) as f:
f.write(A )
# make sure we are under the root of the project
__A = Path(__file__).resolve().parent.parent.parent
__A = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A , __A , __A = model_name.split('''-''')
__A = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : list ) -> list:
"""simple docstring"""
__snake_case : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
__snake_case : Optional[Any] = True
for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : List[Any] = False
for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : Any = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 61 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1000000 , SCREAMING_SNAKE_CASE__ = 10 ):
snake_case_ = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""") | 39 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="Speech2TextFeatureExtractor"
a : int ="Speech2TextTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
lowerCAmelCase : Any = self.feature_extractor
lowerCAmelCase : str = False
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase : Any = kwargs.pop("raw_speech" )
else:
lowerCAmelCase : Optional[int] = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowerCAmelCase : int = args[0]
lowerCAmelCase : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowerCAmelCase : int = self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : Dict = encodings["input_ids"]
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase : List[str] = True
lowerCAmelCase : Any = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Dict = False
| 645 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = LxmertConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ = LxmertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 719 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Tuple = "▁"
A_ : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
A_ : Dict = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
A_ : Dict = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
A_ : Any = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase__ : List[int] = []
lowerCamelCase__ : List[int] = []
def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Any="<pad>" , __UpperCAmelCase : str="<unk>" , __UpperCAmelCase : Dict="m2m100" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : str=8 , **__UpperCAmelCase : str , ) -> None:
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__ = language_codes
SCREAMING_SNAKE_CASE__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
SCREAMING_SNAKE_CASE__ = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
SCREAMING_SNAKE_CASE__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__UpperCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__UpperCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , language_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = load_json(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = spm_file
SCREAMING_SNAKE_CASE__ = load_spm(__UpperCAmelCase , self.sp_model_kwargs )
SCREAMING_SNAKE_CASE__ = len(self.encoder )
SCREAMING_SNAKE_CASE__ = {
self.get_lang_token(__UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase )
}
SCREAMING_SNAKE_CASE__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase )}
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.lang_token_to_id.items()}
SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else """en"""
SCREAMING_SNAKE_CASE__ = tgt_lang
SCREAMING_SNAKE_CASE__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
SCREAMING_SNAKE_CASE__ = num_madeup_words
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Tuple ) -> Tuple:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Union[str, Any] , __UpperCAmelCase : Dict ) -> None:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
SCREAMING_SNAKE_CASE__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
SCREAMING_SNAKE_CASE__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro" , **__UpperCAmelCase : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : Tuple ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_lang_id(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = self.get_lang_token(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE__ = [self.cur_lang_id]
SCREAMING_SNAKE_CASE__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = self.get_lang_token(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE__ = [self.cur_lang_id]
SCREAMING_SNAKE_CASE__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str ) -> str:
return self.lang_code_to_token[lang]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_lang_token(__UpperCAmelCase )
return self.lang_token_to_id[lang_token]
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = sentencepiece.SentencePieceProcessor(**snake_case__ )
spm.Load(str(snake_case__ ) )
return spm
def A ( snake_case__ ):
'''simple docstring'''
with open(snake_case__ , """r""" ) as f:
return json.load(snake_case__ )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=2 )
| 616 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 693 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase: Any = 250_004
UpperCAmelCase: Tuple = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : List[Any] = MBartTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : str = MBartTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
_lowercase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_lowercase : str = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowercase : Dict = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Tuple = tempfile.mkdtemp()
_lowercase : Tuple = tokenizer_r.save_pretrained(UpperCAmelCase_ )
_lowercase : int = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_lowercase : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
# Checks everything loads correctly in the same way
_lowercase : int = tokenizer_r.from_pretrained(UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
_lowercase : Optional[Any] = tempfile.mkdtemp()
_lowercase : List[Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ ,legacy_format=UpperCAmelCase_ )
_lowercase : Optional[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
# Checks everything loads correctly in the same way
_lowercase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
_lowercase : Optional[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
_lowercase : Optional[Any] = tempfile.mkdtemp()
_lowercase : Any = tokenizer_r.save_pretrained(UpperCAmelCase_ ,legacy_format=UpperCAmelCase_ )
_lowercase : Dict = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowercase : Optional[Any] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "facebook/mbart-large-en-ro"
SCREAMING_SNAKE_CASE_ : Dict = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE_ : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE_ : Optional[int] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def lowerCamelCase__ ( cls ):
_lowercase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" )
_lowercase : List[str] = 1
return cls
def lowerCamelCase__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,25_00_20 )
def lowerCamelCase__ ( self ):
_lowercase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
self.assertIn(UpperCAmelCase_ ,self.tokenizer.all_special_ids )
_lowercase : Tuple = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
_lowercase : List[Any] = self.tokenizer.decode(UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ )
_lowercase : Any = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,UpperCAmelCase_ )
_lowercase : Tuple = 10
_lowercase : Dict = self.tokenizer(UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[25_00_26, 25_00_01] )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = tempfile.mkdtemp()
_lowercase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
_lowercase : List[str] = MBartTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : str = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
_lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
_lowercase : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,UpperCAmelCase_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.tokenizer(self.src_text ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=3 ,return_tensors="""pt""" )
_lowercase : List[str] = self.tokenizer(
text_target=self.tgt_text ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=10 ,return_tensors="""pt""" )
_lowercase : Any = targets["""input_ids"""]
_lowercase : Dict = shift_tokens_right(UpperCAmelCase_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} ,)
| 600 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_lowercase : int = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : str = 0
_lowercase : Any = 2
while digits < n:
index += 1
_lowercase : Union[str, Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 600 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger()
@dataclass
class _a :
"""simple docstring"""
A_ = 42
A_ = field(default_factory=UpperCAmelCase__ )
A_ = field(default_factory=UpperCAmelCase__ )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase ) -> List[str]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def _UpperCAmelCase ( self ) -> List[str]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 1
A_ = field(default_factory=UpperCAmelCase__ )
A_ = field(default_factory=UpperCAmelCase__ )
A_ = True
def __call__( self , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = Tracker(self.dest )(_UpperCAmelCase ).parametrized
UpperCamelCase_ = Tracker(self.src )(_UpperCAmelCase ).parametrized
UpperCamelCase_ = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
UpperCamelCase_ = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"""
f""" destination module has {len(_UpperCAmelCase )}.""" )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> Optional[Any]:
super().__init__()
UpperCamelCase_ = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f"""Unexpected layer name {k}"""
UpperCamelCase_ = len(_UpperCAmelCase ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
UpperCamelCase_ = nn.ModuleDict(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
return get_trunk_forward_outputs(
_UpperCAmelCase , out_feat_keys=_UpperCAmelCase , feature_blocks=self._feature_blocks , )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
UpperCamelCase_ = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , _UpperCAmelCase ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCamelCase_ = self.convert_name_to_timm(_UpperCAmelCase )
UpperCamelCase_ = partial(lambda: (timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval(), None) )
else:
UpperCamelCase_ = super().__getitem__(_UpperCAmelCase )
return val
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __getitem__( self , _UpperCAmelCase ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCamelCase_ = RegNetModel
else:
UpperCamelCase_ = RegNetForImageClassification
return val
def _snake_case (__lowercase , __lowercase , __lowercase):
for from_key, to_key in keys:
UpperCamelCase_ = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""")
return to_state_dict
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ):
print(f"""Converting {name}...""")
with torch.no_grad():
UpperCamelCase_ , UpperCamelCase_ = from_model_func()
UpperCamelCase_ = our_model_func(__lowercase).eval()
UpperCamelCase_ = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase)
UpperCamelCase_ = torch.randn((1, 3, 224, 224))
module_transfer(__lowercase)
if from_state_dict is not None:
UpperCamelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCamelCase_ = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCamelCase_ = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase)
our_model.load_state_dict(__lowercase)
UpperCamelCase_ = our_model(__lowercase , output_hidden_states=__lowercase)
UpperCamelCase_ = (
our_outputs.logits if isinstance(__lowercase , __lowercase) else our_outputs.last_hidden_state
)
UpperCamelCase_ = from_model(__lowercase)
UpperCamelCase_ = from_output[-1] if type(__lowercase) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCamelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__lowercase , )
UpperCamelCase_ = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCamelCase_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__lowercase)
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__lowercase , )
print(f"""Pushed {name}""")
def _snake_case (__lowercase , __lowercase = None , __lowercase = True):
UpperCamelCase_ = 'imagenet-1k-id2label.json'
UpperCamelCase_ = 1000
UpperCamelCase_ = (1, num_labels)
UpperCamelCase_ = 'huggingface/label-files'
UpperCamelCase_ = num_labels
UpperCamelCase_ = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset')) , 'r'))
UpperCamelCase_ = {int(__lowercase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase)
UpperCamelCase_ = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x'),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x'),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x'),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x'),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x'),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x'),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x'),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x'),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x'),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x'),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x'),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x'),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010),
}
UpperCamelCase_ = NameToOurModelFuncMap()
UpperCamelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase , __lowercase) -> Tuple[nn.Module, Dict]:
UpperCamelCase_ = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase) , map_location='cpu')
UpperCamelCase_ = model_func()
# check if we have a head, if yes add it
UpperCamelCase_ = files['classy_state_dict']['base_model']['model']
UpperCamelCase_ = model_state_dict['trunk']
model.load_state_dict(__lowercase)
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52))) , )
# IN1K finetuned
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase_ = partial(
__lowercase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52))) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
snake_case__ : Dict = parser.parse_args()
snake_case__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 23 |
"""simple docstring"""
from collections.abc import Callable
class a__ :
def __init__( self :Tuple , _lowerCamelCase :Callable | None = None ):
'''simple docstring'''
UpperCamelCase_ : list =[]
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ : dict ={}
# Stores current size of heap.
UpperCamelCase_ : Any =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ : List[str] =key or (lambda _lowerCamelCase : x)
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : List[str] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =self.arr[j], self.arr[i]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : int =self._left(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self._right(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =i
if left is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Optional[int] =left
if right is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[Any] =right
return valid_parent
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Dict =self._parent(_lowerCamelCase )
while parent is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : Dict =parent, self._parent(_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self._get_valid_parent(_lowerCamelCase )
while valid_parent != index:
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : int =valid_parent, self._get_valid_parent(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : List[Any] =self.pos_map[item]
UpperCamelCase_ : int =[item, self.key(_lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : Any =self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ : Dict =self.arr[self.size - 1]
UpperCamelCase_ : Optional[int] =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_lowerCamelCase )] )
else:
UpperCamelCase_ : str =[item, self.key(_lowerCamelCase )]
UpperCamelCase_ : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self.arr[0] if self.size else None
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : int =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = tesseract_config if tesseract_config is not None else """"""
# apply OCR
snake_case_ = to_pil_image(lowercase_ )
snake_case_ , snake_case_ = pil_image.size
snake_case_ = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type="""dict""" , config=lowercase_ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
snake_case_ = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
snake_case_ = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
snake_case_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
snake_case_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : str = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = "" , **lowerCamelCase , ) -> None:
super().__init__(**lowerCamelCase )
snake_case_ = size if size is not None else {"""height""": 224, """width""": 224}
snake_case_ = get_size_dict(lowerCamelCase )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = apply_ocr
snake_case_ = ocr_lang
snake_case_ = tesseract_config
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
snake_case_ = (size["""height"""], size["""width"""])
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> PIL.Image.Image:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCamelCase )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
snake_case_ = []
snake_case_ = []
for image in images:
snake_case_ , snake_case_ = apply_tesseract(lowerCamelCase , lowerCamelCase , lowerCamelCase )
words_batch.append(lowerCamelCase )
boxes_batch.append(lowerCamelCase )
if do_resize:
snake_case_ = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ = [flip_channel_order(lowerCamelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case_ = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase )
if apply_ocr:
snake_case_ = words_batch
snake_case_ = boxes_batch
return data | 161 |
from math import ceil
def UpperCamelCase( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = list(range(0 , lowercase_ ) )
snake_case_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
snake_case_ = []
for i in device_map_blocks:
if device_map_blocks.count(lowercase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase_ )
# Missing blocks
snake_case_ = [i for i in blocks if i not in device_map_blocks]
snake_case_ = [i for i in device_map_blocks if i not in blocks]
if len(lowercase_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(lowercase_ ) )
if len(lowercase_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(lowercase_ ) )
if len(lowercase_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(lowercase_ ) )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = list(range(lowercase_ ) )
snake_case_ = int(ceil(n_layers / len(lowercase_ ) ) )
snake_case_ = [layers[i : i + n_blocks] for i in range(0 , lowercase_ , lowercase_ )]
return dict(zip(lowercase_ , lowercase_ ) ) | 161 | 1 |
from math import ceil
def __a ( A__ : str , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = list(range(0 , A__ ) )
SCREAMING_SNAKE_CASE = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE = []
for i in device_map_blocks:
if device_map_blocks.count(A__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(A__ )
# Missing blocks
SCREAMING_SNAKE_CASE = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE = [i for i in device_map_blocks if i not in blocks]
if len(A__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(A__ ) )
if len(A__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(A__ ) )
if len(A__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(A__ ) )
def __a ( A__ : Dict , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = list(range(A__ ) )
SCREAMING_SNAKE_CASE = int(ceil(n_layers / len(A__ ) ) )
SCREAMING_SNAKE_CASE = [layers[i : i + n_blocks] for i in range(0 , A__ , A__ )]
return dict(zip(A__ , A__ ) ) | 16 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = word.split()
def justify(lowercase , lowercase , lowercase ) -> str:
__lowercase = max_width - width
__lowercase = len(lowercase )
if len(lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__lowercase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__lowercase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__lowercase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowercase ):
num_spaces_between_words_list[i] += 1
__lowercase = []
for i in range(lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowercase )
__lowercase = []
__lowercase = []
__lowercase = 0
for word in words:
if width + len(lowercase ) + len(lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowercase )
width += len(lowercase )
else:
# justify the line and add it to result
answer.append(justify(lowercase , lowercase , lowercase ) )
# reset new line and new width
__lowercase , __lowercase = [word], len(lowercase )
__lowercase = max_width - width - len(lowercase )
answer.append(''' '''.join(lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 534 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=30 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=10 , __snake_case=0.0_2 , __snake_case=None , __snake_case=2 , ):
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : int = image_size
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = type_sequence_label_size
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Dict = scope
_UpperCamelCase : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Dict = (image_size // patch_size) ** 2
_UpperCamelCase : Dict = num_patches + 1
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = ViTModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = ViTForMaskedImageModeling(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : Optional[Any] = ViTForMaskedImageModeling(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = self.type_sequence_label_size
_UpperCamelCase : Optional[int] = ViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : List[Any] = ViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self):
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : Optional[Any] = config_and_inputs
_UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Any = ViTModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear))
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = model_class(__snake_case)
_UpperCamelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = ViTModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def A__ ( self):
_UpperCamelCase : List[str] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(__snake_case)
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : Optional[Any] = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__snake_case)
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : int = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@slow
def A__ ( self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : Tuple = ViTModel.from_pretrained('facebook/dino-vits8').to(__snake_case)
_UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80)
_UpperCamelCase : Tuple = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt')
_UpperCamelCase : str = inputs.pixel_values.to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(__snake_case , interpolate_pos_encoding=__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 36_01, 3_84))
self.assertEqual(outputs.last_hidden_state.shape , __snake_case)
_UpperCamelCase : Any = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self):
_UpperCamelCase : Any = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=__snake_case , return_tensors='pt')
_UpperCamelCase : List[Any] = inputs.pixel_values.to(__snake_case)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)
| 705 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( UpperCamelCase : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
raise NotImplementedError()
| 411 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_snake_case : List[Any] = ''
_snake_case : Dict = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_snake_case , _snake_case : Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
_snake_case : Optional[Any] = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
_snake_case : Any = 0
for j in range(len(lowerCAmelCase ) ):
_snake_case : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_snake_case : List[str] = j - k + 1 # noqa: E741
_snake_case : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
_snake_case : List[Any] = length[j]
_snake_case : Optional[Any] = j
# create that string
_snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=_lowerCamelCase ) as mock_head:
__A = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=_lowerCamelCase ) as mock_head:
__A = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__A = tempfile.mktemp()
with open(_lowerCamelCase, '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', _lowerCamelCase )
__A = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''', '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', _lowerCamelCase )
__A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ):
'''simple docstring'''
__A = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token )
__A = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase, repo_id='''test-tokenizer''', push_to_hub=_lowerCamelCase, use_auth_token=self._token )
__A = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token )
__A = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=_lowerCamelCase, use_auth_token=self._token )
__A = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
__A = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__A = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
__A = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' )
__A = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer', use_fast=_lowerCamelCase, trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = Trie()
__A = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase, ['''AB''', '''C'''] )
| 719 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class snake_case :
'''simple docstring'''
A_ : Tuple = PegasusConfig
A_ : Optional[Any] = {}
A_ : Any = "gelu"
def __init__( self : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : str=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : int=False, _lowerCamelCase : str=99, _lowerCamelCase : Union[str, Any]=32, _lowerCamelCase : str=2, _lowerCamelCase : List[Any]=4, _lowerCamelCase : Optional[Any]=37, _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : Optional[int]=0.1, _lowerCamelCase : Optional[Any]=40, _lowerCamelCase : List[str]=2, _lowerCamelCase : Dict=1, _lowerCamelCase : Any=0, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__A = tf.concat([input_ids, eos_tensor], axis=1 )
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
__A = prepare_pegasus_inputs_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
__A = inputs_dict['''input_ids''']
__A = input_ids[:1, :]
__A = inputs_dict['''attention_mask'''][:1, :]
__A = inputs_dict['''head_mask''']
__A = 1
# first forward pass
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, head_mask=_lowerCamelCase, use_cache=_lowerCamelCase )
__A , __A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3), config.vocab_size )
__A = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__A = tf.concat([input_ids, next_tokens], axis=-1 )
__A = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase )[0]
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__A = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__A = output_from_no_past[:, -3:, random_slice_idx]
__A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase, _lowerCamelCase, rtol=1e-3 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
__A = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A_ : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A_ : Optional[int] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Tuple = True
A_ : Union[str, Any] = False
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = TFPegasusModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A_ : str = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A_ : Union[str, Any] = "google/pegasus-xsum"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _SCREAMING_SNAKE_CASE ( self : str, **_lowerCamelCase : str ):
'''simple docstring'''
__A = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], **_lowerCamelCase : Tuple ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, **_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''tf''' )
__A = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_lowerCamelCase, )
__A = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 215 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
import requests
_SCREAMING_SNAKE_CASE : Optional[int] = "" # <-- Put your OpenWeatherMap appid here!
_SCREAMING_SNAKE_CASE : Optional[Any] = "https://api.openweathermap.org/data/2.5/"
def UpperCAmelCase__ (UpperCamelCase_ = "Chicago" ,UpperCamelCase_ = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + '''weather''' ,params=locals() ).json()
def UpperCAmelCase__ (UpperCamelCase_ = "Kolkata, India" ,UpperCamelCase_ = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + '''forecast''' ,params=locals() ).json()
def UpperCAmelCase__ (UpperCamelCase_ = 55.68 ,UpperCamelCase_ = 12.57 ,UpperCamelCase_ = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + '''onecall''' ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_SCREAMING_SNAKE_CASE : Any = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 550 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ = """OwlViTImageProcessor"""
UpperCAmelCase_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :Optional[Any] , lowerCamelCase :Dict=None , lowerCamelCase :Any=None , **lowerCamelCase :Optional[Any] ) -> int:
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self :Tuple , lowerCamelCase :Any=None , lowerCamelCase :int=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Any="max_length" , lowerCamelCase :str="np" , **lowerCamelCase :Optional[Any] ) -> Dict:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
UpperCAmelCase__ = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
UpperCAmelCase__ = []
# Maximum number of queries across batch
UpperCAmelCase__ = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ = t + [" "] * (max_num_queries - len(lowerCamelCase ))
UpperCAmelCase__ = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase__ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase__ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase__ = BatchEncoding()
UpperCAmelCase__ = input_ids
UpperCAmelCase__ = attention_mask
if query_images is not None:
UpperCAmelCase__ = BatchEncoding()
UpperCAmelCase__ = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
UpperCAmelCase__ = query_pixel_values
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] , *lowerCamelCase :Tuple , **lowerCamelCase :Union[str, Any] ) -> Dict:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple , *lowerCamelCase :Dict , **lowerCamelCase :Any ) -> List[Any]:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Union[str, Any] , *lowerCamelCase :List[Any] , **lowerCamelCase :Union[str, Any] ) -> Dict:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] , *lowerCamelCase :Optional[int] , **lowerCamelCase :str ) -> Dict:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple , *lowerCamelCase :Tuple , **lowerCamelCase :List[Any] ) -> str:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self :Union[str, Any] ) -> str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 717 |
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1 / 1_2345 ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
UpperCAmelCase__ = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
__snake_case = random()
__snake_case = None
__snake_case = None
def __repr__( self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
'''simple docstring'''
__snake_case = str(self.value ) + ''' '''
__snake_case = str(self.left or '''''' )
__snake_case = str(self.right or '''''' )
return value + left + right
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__snake_case , __snake_case = split(root.left , _lowerCamelCase )
return left, root
else:
__snake_case , __snake_case = split(root.right , _lowerCamelCase )
return root, right
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : Node | None )-> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__snake_case = merge(left.right , _lowerCamelCase )
return left
else:
__snake_case = merge(_lowerCamelCase , right.left )
return right
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> Node | None:
'''simple docstring'''
__snake_case = Node(_lowerCamelCase )
__snake_case , __snake_case = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> Node | None:
'''simple docstring'''
__snake_case , __snake_case = split(_lowerCamelCase , value - 1 )
__snake_case , __snake_case = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Node | None )-> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : str )-> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
__snake_case = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
__snake_case = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _UpperCamelCase ()-> None:
'''simple docstring'''
__snake_case = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__snake_case = input()
while args != "q":
__snake_case = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
__snake_case = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[int] , __a : List[str]=14 , __a : Optional[Any]=7 , __a : List[Any]=True , __a : Tuple=True , __a : Union[str, Any]=True , __a : Any=True , __a : Any=True , __a : Dict=99 , __a : List[Any]=32 , __a : Union[str, Any]=5 , __a : List[Any]=4 , __a : Tuple=37 , __a : Dict="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[int]=512 , __a : Union[str, Any]=16 , __a : Tuple=2 , __a : Tuple=0.02 , __a : List[str]=3 , __a : Tuple=4 , __a : int=None , ) -> int:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : Optional[int] = batch_size
__lowercase : int = seq_length
__lowercase : Any = is_training
__lowercase : str = use_token_type_ids
__lowercase : Dict = use_input_mask
__lowercase : Tuple = use_labels
__lowercase : Optional[Any] = use_mc_token_ids
__lowercase : int = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : List[str] = scope
__lowercase : Optional[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Dict = None
if self.use_mc_token_ids:
__lowercase : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowercase : Tuple = None
__lowercase : int = None
__lowercase : Any = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Dict = self.get_config()
__lowercase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : str , __a : Optional[int] , __a : Any , __a : Union[str, Any] , *__a : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__lowercase : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Optional[Any] , *__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : int , __a : Dict , __a : str , __a : List[str] , *__a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : Optional[Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_A : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
_A : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = True
_A : List[Any] = False
_A : List[Any] = False
def lowerCAmelCase ( self : int , __a : Tuple , __a : int , __a : str , __a : int , __a : Dict ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = CTRLModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , n_embd=37 )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__a )
__lowercase : str = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__lowercase : Union[str, Any] = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase : List[Any] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a ) | 149 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case : Tuple = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('''\n'''.join(upper_files) + '''\n''')
snake_case : int = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('''\n'''.join(space_files) + '''\n''')
snake_case : int = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('''\n'''.join(hyphen_files) + '''\n''')
snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('''\n'''.join(nodir_files) + '''\n''')
snake_case : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 706 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 0 |
"""simple docstring"""
from __future__ import annotations
A : str = 'Muhammad Umer Farooq'
A : Dict = 'MIT'
A : Optional[Any] = '1.0.0'
A : Optional[int] = 'Muhammad Umer Farooq'
A : Tuple = '[email protected]'
A : Optional[int] = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :str ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase__ = []
UpperCamelCase__ = domain
def lowerCamelCase__ ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :list[tuple[str, str | None]] ) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase__ = parse.urljoin(self.domain , lowerCamelCase_ )
self.urls.append(lowerCamelCase_ )
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_snake_case ).split("." )[-2:] )
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
return parse.urlparse(_snake_case ).netloc
def snake_case__ ( _snake_case : str = "https://github.com" ):
"""simple docstring"""
UpperCamelCase__ = get_domain_name(_snake_case )
# Initialize the parser
UpperCamelCase__ = Parser(_snake_case )
try:
# Open URL
UpperCamelCase__ = requests.get(_snake_case )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase__ = requests.get(_snake_case )
# Get the valid email.
UpperCamelCase__ = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_snake_case )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_snake_case )
if __name__ == "__main__":
A : Optional[int] = emails_from_url('https://github.com')
print(F"{len(emails)} emails found:")
print('\n'.join(sorted(emails))) | 516 | """simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A : Tuple = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case__ ( _snake_case : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case ) | 516 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : str = field(default="""audio-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ : ClassVar[Features] = Features({"""audio""": Audio()} )
lowerCAmelCase_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
lowerCAmelCase_ : str = "audio"
lowerCAmelCase_ : str = "labels"
def A_ ( self : int , snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def A_ ( self : Tuple ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 109 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase__ ( ) -> Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase__ ( ) -> str:
assert _test_patching.open is open
A = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> List[Any]:
# pandas.read_csv is not present in _test_patching
A = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , lowerCamelCase__ ):
pass
def lowerCAmelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase__ ( ) -> Union[str, Any]:
A = '__test_patch_submodule_start_and_stop_mock__'
A = patch_submodule(_test_patching , 'open' , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> int:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A = '__test_patch_submodule_successive_join__'
A = '__test_patch_submodule_successive_dirname__'
A = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase__ ( ) -> Optional[Any]:
A = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
| 109 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> str:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__lowercase , __lowercase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_UpperCAmelCase = False
if num < 0:
_UpperCAmelCase = True
_UpperCAmelCase = -num
_UpperCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowercase ) for e in binary )
return "0b" + "".join(str(__lowercase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = LxmertTokenizer
_lowerCamelCase : Optional[int] = LxmertTokenizerFast
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[Any] = True
def lowercase ( self : Optional[Any] ):
super().setUp()
_UpperCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self : Dict , snake_case_ : List[Any] ):
_UpperCAmelCase = "UNwant\u00E9d,running"
_UpperCAmelCase = "unwanted, running"
return input_text, output_text
def lowercase ( self : int ):
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [7, 4, 5, 1_0, 8, 9] )
def lowercase ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case_ )
_UpperCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 236 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Any , lowercase : Any ) -> Any:
# Initialise PyTorch model
_a = FunnelConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 521 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Any , lowercase : Any ) -> Any:
# Initialise PyTorch model
_a = FunnelConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 521 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
def snake_case__ ( *UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ):
requires_backends(UpperCAmelCase , ["torch"] )
def snake_case__ ( *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(UpperCAmelCase , ["torch"] )
def snake_case__ ( *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
requires_backends(UpperCAmelCase , ["torch"] )
def snake_case__ ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(UpperCAmelCase , ["torch"] )
def snake_case__ ( *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
requires_backends(UpperCAmelCase , ["torch"] )
def snake_case__ ( *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(UpperCAmelCase , ["torch"] )
def snake_case__ ( *UpperCAmelCase : str , **UpperCAmelCase : str ):
requires_backends(UpperCAmelCase , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
| 145 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''EncodecFeatureExtractor'''
A = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = self.feature_extractor
lowerCAmelCase__ :Tuple = False
def snake_case_ ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("sampling_rate" , _lowerCAmelCase )
lowerCAmelCase__ :Dict = kwargs.pop("text" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :Optional[int] = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCAmelCase__ :Any = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if audio is not None:
lowerCAmelCase__ :Tuple = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ :List[str] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCAmelCase__ :int = audio_inputs["padding_mask"]
return inputs
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("padding_mask" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = to_numpy(_lowerCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = to_numpy(_lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ :str = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ :Union[str, Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ :Optional[Any] = np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audio_values.tolist()
for i in range(_lowerCAmelCase ):
lowerCAmelCase__ :str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ :List[Any] = sliced_audio.reshape(_lowerCAmelCase , -1 )
return audio_values
| 145 | 1 |
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 300 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Tuple = 99
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Any = 0.02
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = '''last'''
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = 0
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase_ : Optional[int] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any:
UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = self.num_choices
UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : Dict = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 300 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[32, 64, 1_28] , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1e-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , __a=["stage1", "stage2"] , __a=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = patch_norm
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = is_training
_UpperCamelCase = scope
_UpperCamelCase = use_labels
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = encoder_stride
_UpperCamelCase = out_features
_UpperCamelCase = out_indices
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FocalNetModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
_UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def UpperCAmelCase ( self , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FocalNetBackbone(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_UpperCamelCase = None
_UpperCamelCase = FocalNetBackbone(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = FocalNetForMaskedImageModeling(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = FocalNetForMaskedImageModeling(__a)
model.to(__a)
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = FocalNetForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = FocalNetForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = FocalNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@unittest.skip(reason='''FocalNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''')
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__a) , __a)
# FocalNet has a different seq_length
_UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__a) , __a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = reshaped_hidden_states[0].shape
_UpperCamelCase = (
reshaped_hidden_states[0].view(__a , __a , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width))
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FocalNetModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([0.2166, -0.4368, 0.2191]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_81)
@require_torch
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ = FocalNetConfig
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FocalNetModelTester(self)
| 19 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_0 , a=2 , a=3 , a=True , a=True , a=3_2 , a=2 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , ) -> Dict:
lowercase__ : str = parent
lowercase__ : str = batch_size
lowercase__ : Any = image_size
lowercase__ : Dict = patch_size
lowercase__ : Dict = num_channels
lowercase__ : List[str] = is_training
lowercase__ : str = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : List[str] = initializer_range
lowercase__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[str] = (image_size // patch_size) ** 2
lowercase__ : List[str] = num_patches + 1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : str = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : str = TFViTModel(config=a )
lowercase__ : Dict = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase__ : Optional[Any] = self.image_size // 2
lowercase__ : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
lowercase__ : Optional[Any] = model(a , interpolate_pos_encoding=a , training=a )
lowercase__ : Dict = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[Any] = self.type_sequence_label_size
lowercase__ : Optional[int] = TFViTForImageClassification(a )
lowercase__ : Any = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase__ : Optional[int] = self.image_size // 2
lowercase__ : Any = pixel_values[:, :, :image_size, :image_size]
lowercase__ : Optional[int] = model(a , interpolate_pos_encoding=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : Any = TFViTForImageClassification(a )
lowercase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase__ : int = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : int = TFViTModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(a )
lowercase__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[str] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=a , return_tensors='tf' )
# forward pass
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : Tuple = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Union[str, Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a , atol=1e-4 )
| 599 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''sew'''
def __init__( self : Dict , lowercase : List[Any]=32 , lowercase : Tuple=7_68 , lowercase : str=12 , lowercase : List[str]=12 , lowercase : str=30_72 , lowercase : int=2 , lowercase : str="gelu" , lowercase : str=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : Optional[int]=0.1 , lowercase : Dict=0.0 , lowercase : List[Any]=0.1 , lowercase : Optional[int]=0.1 , lowercase : Union[str, Any]=0.0_2 , lowercase : Tuple=1E-5 , lowercase : int="group" , lowercase : str="gelu" , lowercase : List[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : Optional[Any]=False , lowercase : int=1_28 , lowercase : Union[str, Any]=16 , lowercase : Any=True , lowercase : int=0.0_5 , lowercase : Any=10 , lowercase : Dict=2 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=10 , lowercase : Any=0 , lowercase : int="mean" , lowercase : List[Any]=False , lowercase : Optional[Any]=False , lowercase : List[str]=2_56 , lowercase : List[Any]=0 , lowercase : Optional[int]=1 , lowercase : Any=2 , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = feat_extract_norm
UpperCAmelCase : List[str] = feat_extract_activation
UpperCAmelCase : int = list(lowercase )
UpperCAmelCase : List[Any] = list(lowercase )
UpperCAmelCase : Optional[Any] = list(lowercase )
UpperCAmelCase : Union[str, Any] = conv_bias
UpperCAmelCase : List[Any] = num_conv_pos_embeddings
UpperCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups
UpperCAmelCase : Tuple = len(self.conv_dim )
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Union[str, Any] = squeeze_factor
UpperCAmelCase : int = hidden_act
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[str] = hidden_dropout
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : List[str] = feat_proj_dropout
UpperCAmelCase : Optional[Any] = final_dropout
UpperCAmelCase : List[str] = layerdrop
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : List[str] = apply_spec_augment
UpperCAmelCase : List[str] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : List[Any] = mask_time_min_masks
UpperCAmelCase : Union[str, Any] = mask_feature_prob
UpperCAmelCase : Dict = mask_feature_length
UpperCAmelCase : Dict = mask_feature_min_masks
# ctc loss
UpperCAmelCase : Tuple = ctc_loss_reduction
UpperCAmelCase : Optional[Any] = ctc_zero_infinity
# sequence classification
UpperCAmelCase : Any = use_weighted_layer_sum
UpperCAmelCase : Tuple = classifier_proj_size
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 292 |
"""simple docstring"""
def lowercase_ ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowercase_ ( ):
'''simple docstring'''
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 292 | 1 |
'''simple docstring'''
from itertools import product
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = sides_number
__magic_name__ : List[str] = max_face_number * dice_number
__magic_name__ : Dict = [0] * (max_total + 1)
__magic_name__ : Dict = 1
__magic_name__ : Dict = range(lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ):
__magic_name__ : List[Any] = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__magic_name__ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__magic_name__ : List[str] = 0
__magic_name__ : List[str] = 9
__magic_name__ : List[Any] = 4 * 9
__magic_name__ : Any = 6
for peter_total in range(lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__magic_name__ : List[str] = (4**9) * (6**6)
__magic_name__ : Optional[int] = peter_wins_count / total_games_number
__magic_name__ : Tuple = round(lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }') | 561 |
'''simple docstring'''
import os
lowerCAmelCase :Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = 0
__magic_name__ : Optional[Any] = 0
while index < len(lowerCAmelCase ) - 1:
__magic_name__ : Any = SYMBOLS[numerals[index]]
__magic_name__ : Optional[int] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Tuple = ''
__magic_name__ : Dict = num // 1000
numerals += m_count * "M"
num %= 1000
__magic_name__ : int = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__magic_name__ : List[str] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase ( lowerCAmelCase : str = "/p089_roman.txt" ):
"""simple docstring"""
__magic_name__ : int = 0
with open(os.path.dirname(lowerCAmelCase ) + roman_numerals_filename ) as filea:
__magic_name__ : str = filea.readlines()
for line in lines:
__magic_name__ : Dict = line.strip()
__magic_name__ : List[Any] = parse_roman_numerals(lowerCAmelCase )
__magic_name__ : Union[str, Any] = generate_roman_numerals(lowerCAmelCase )
savings += len(lowerCAmelCase ) - len(lowerCAmelCase )
return savings
if __name__ == "__main__":
print(F'{solution() = }') | 561 | 1 |
"""simple docstring"""
import operator
def lowerCAmelCase ( UpperCamelCase_: list , UpperCamelCase_: bool = False , UpperCamelCase_: list | None = None ) -> list:
'''simple docstring'''
_a = operator.lt if reverse else operator.gt
_a = solution or []
if not arr:
return solution
_a = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase_ ):
if _operator(UpperCamelCase_ , sublist[-1] ):
sublist.append(UpperCamelCase_ )
arr.pop(UpperCamelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase_ )
else:
while sublist:
_a = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase_ ):
if not _operator(UpperCamelCase_ , UpperCamelCase_ ):
solution.insert(UpperCamelCase_ , UpperCamelCase_ )
break
else:
solution.append(UpperCamelCase_ )
strand_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 719 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase ( UpperCamelCase_: ndarray ) -> float:
'''simple docstring'''
return np.dot(UpperCamelCase_ , UpperCamelCase_ )
class lowercase_ :
def __init__( self , *,
a_ = np.inf , a_ = "linear" , a_ = 0.0 , ) ->None:
'''simple docstring'''
_a = regularization
_a = gamma
if kernel == "linear":
_a = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
_a = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_a = f'''Unknown kernel: {kernel}'''
raise ValueError(a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->float:
'''simple docstring'''
return np.dot(a_ , a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCamelCase__ ( self , a_ , a_ ) ->None:
'''simple docstring'''
_a = observations
_a = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_a) , ) = np.shape(a_ )
def to_minimize(a_ ) -> float:
_a = 0
((_a) , ) = np.shape(a_ )
for i in range(a_ ):
for j in range(a_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(a_ )
_a = LinearConstraint(a_ , 0 , 0 )
_a = Bounds(0 , self.regularization )
_a = minimize(
a_ , np.ones(a_ ) , bounds=a_ , constraints=[ly_contraint] ).x
_a = l_star
# calculating mean offset of separation plane to points
_a = 0
for i in range(a_ ):
for j in range(a_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_a = s / n
def lowerCamelCase__ ( self , a_ ) ->int:
'''simple docstring'''
_a = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , a_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowercase_ ( __A ):
def __init__( self , lowercase_ = "▁" , lowercase_ = True , lowercase_ = "<unk>" , lowercase_ = "</s>" , lowercase_ = "<pad>" , ):
_snake_case : Any = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_snake_case : Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_snake_case : Any = token_dict["token"]
_snake_case : Any = Tokenizer(Unigram() )
_snake_case : Optional[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_snake_case : List[str] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ),
pre_tokenizers.Digits(individual_digits=lowerCAmelCase_ ),
pre_tokenizers.Punctuation(),
] )
_snake_case : Dict = decoders.Metaspace(replacement=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_snake_case : Union[str, Any] = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_snake_case : int = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = 8_000 , lowercase_ = True , ):
_snake_case : List[Any] = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase_ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase_ , )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : List[Any] = [files]
self._tokenizer.train(lowerCAmelCase_ , trainer=lowerCAmelCase_ )
self.add_unk_id()
def UpperCamelCase ( self , lowercase_ , lowercase_ = 8_000 , lowercase_ = True , ):
_snake_case : Any = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase_ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase_ , )
self._tokenizer.train_from_iterator(lowerCAmelCase_ , trainer=lowerCAmelCase_ )
self.add_unk_id()
def UpperCamelCase ( self ):
_snake_case : Tuple = json.loads(self._tokenizer.to_str() )
_snake_case : Union[str, Any] = self.special_tokens["unk"]["id"]
_snake_case : Tuple = Tokenizer.from_str(json.dumps(lowerCAmelCase_ ) ) | 670 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case ( A__ ):
UpperCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ : int = x_den * y_den * z_den
UpperCAmelCase_ : int = gcd(A__ ,A__ )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case ( A__ = 35 ):
UpperCAmelCase_ : set = set()
UpperCAmelCase_ : int
UpperCAmelCase_ : Fraction = Fraction(0 )
UpperCAmelCase_ : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
UpperCAmelCase_ : Optional[int] = x_num * y_den + x_den * y_num
UpperCAmelCase_ : List[Any] = x_den * y_den
UpperCAmelCase_ : Tuple = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : int = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Any = int(sqrt(A__ ) )
UpperCAmelCase_ : str = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Tuple = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=-1
UpperCAmelCase_ : Optional[int] = x_num * y_num
UpperCAmelCase_ : Dict = x_den * y_num + x_num * y_den
UpperCAmelCase_ : Union[str, Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : List[Any] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = x_num * x_num * y_num * y_num
UpperCAmelCase_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : str = int(sqrt(A__ ) )
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Optional[Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Optional[int] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ ,A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 95 | 0 |
from collections.abc import Callable
class lowerCamelCase__ :
def __init__(self : Tuple , _snake_case : Callable | None = None ) -> None:
"""simple docstring"""
lowerCamelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase_ : dict = {}
# Stores current size of heap.
lowerCamelCase_ : List[str] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase_ : Union[str, Any] = key or (lambda _snake_case : x)
def UpperCAmelCase_ (self : str , _snake_case : int ) -> int | None:
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : int ) -> int | None:
"""simple docstring"""
lowerCamelCase_ : Dict = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase_ (self : Dict , _snake_case : int ) -> int | None:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase_ (self : List[str] , _snake_case : int , _snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase_ : Optional[Any] = self.arr[j], self.arr[i]
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : int , _snake_case : int ) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase_ (self : Dict , _snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self._left(_snake_case )
lowerCamelCase_ : List[str] = self._right(_snake_case )
lowerCamelCase_ : Any = i
if left is not None and not self._cmp(_snake_case , _snake_case ):
lowerCamelCase_ : Dict = left
if right is not None and not self._cmp(_snake_case , _snake_case ):
lowerCamelCase_ : str = right
return valid_parent
def UpperCAmelCase_ (self : Tuple , _snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = self._parent(_snake_case )
while parent is not None and not self._cmp(_snake_case , _snake_case ):
self._swap(_snake_case , _snake_case )
lowerCamelCase_ : str = parent, self._parent(_snake_case )
def UpperCAmelCase_ (self : Tuple , _snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self._get_valid_parent(_snake_case )
while valid_parent != index:
self._swap(_snake_case , _snake_case )
lowerCamelCase_ : List[str] = valid_parent, self._get_valid_parent(_snake_case )
def UpperCAmelCase_ (self : List[Any] , _snake_case : int , _snake_case : int ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowerCamelCase_ : List[Any] = self.pos_map[item]
lowerCamelCase_ : Tuple = [item, self.key(_snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : int ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowerCamelCase_ : str = self.pos_map[item]
del self.pos_map[item]
lowerCamelCase_ : Optional[int] = self.arr[self.size - 1]
lowerCamelCase_ : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : int , _snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_snake_case )] )
else:
lowerCamelCase_ : Dict = [item, self.key(_snake_case )]
lowerCamelCase_ : Dict = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase_ (self : int ) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def UpperCAmelCase_ (self : Union[str, Any] ) -> tuple | None:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _a ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
UpperCamelCase = None
def _a ( ) -> Tuple:
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCamelCase__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCamelCase__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def _a ( lowerCamelCase__ ) -> Any:
def remove_articles(lowerCamelCase__ ):
return ARTICLES_REGEX.sub(' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _a ( lowerCamelCase__ ) -> Optional[Any]:
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowerCamelCase_ : Dict = get_tokens(lowerCamelCase__ )
lowerCamelCase_ : Any = get_tokens(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
lowerCamelCase_ : Any = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ : Any = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowerCamelCase_ : List[Any] = {}
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Optional[Any] = qa['id']
lowerCamelCase_ : List[str] = [t for t in qa['answers']['text'] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ : List[Any] = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
lowerCamelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCamelCase_ : Tuple = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
lowerCamelCase_ : str = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] = {}
for qid, s in scores.items():
lowerCamelCase_ : str = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ : str = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase_ : List[Any] = s
return new_scores
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]:
if not qid_list:
lowerCamelCase_ : int = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCamelCase_ : Tuple = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
for k in new_eval:
lowerCamelCase_ : str = new_eval[k]
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
plt.step(lowerCamelCase__ , lowerCamelCase__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Dict:
lowerCamelCase_ : str = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
lowerCamelCase_ : List[str] = 0.0
lowerCamelCase_ : str = 1.0
lowerCamelCase_ : Union[str, Any] = 0.0
lowerCamelCase_ : str = [1.0]
lowerCamelCase_ : Any = [0.0]
lowerCamelCase_ : Optional[int] = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ : List[Any] = true_pos / float(i + 1 )
lowerCamelCase_ : str = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 100.0 * avg_prec}
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase_ : Dict = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCamelCase_ : Optional[Any] = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCamelCase_ : List[Any] = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ : str = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_exact' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_f1' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_oracle' )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if not qid_list:
return
lowerCamelCase_ : int = [na_probs[k] for k in qid_list]
lowerCamelCase_ : Dict = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(lowerCamelCase__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase_ : Tuple = num_no_ans
lowerCamelCase_ : Dict = cur_score
lowerCamelCase_ : int = 0.0
lowerCamelCase_ : int = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ : List[str] = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ : int = -1
else:
lowerCamelCase_ : Any = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ : List[str] = cur_score
lowerCamelCase_ : Dict = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase__ ), best_thresh
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ : Any = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = best_exact
lowerCamelCase_ : List[str] = exact_thresh
lowerCamelCase_ : str = best_fa
lowerCamelCase_ : Optional[int] = fa_thresh
def _a ( ) -> Optional[Any]:
with open(OPTS.data_file ) as f:
lowerCamelCase_ : List[str] = json.load(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCamelCase_ : List[str] = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase_ : int = json.load(lowerCamelCase__ )
else:
lowerCamelCase_ : Dict = {k: 0.0 for k in preds}
lowerCamelCase_ : List[Any] = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
lowerCamelCase_ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ , lowerCamelCase_ : Tuple = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Any = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ : Dict = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ : Tuple = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
lowerCamelCase_ : List[str] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'HasAns' )
if no_ans_qids:
lowerCamelCase_ : Optional[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 144 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = inspect.getfile(accelerate.test_utils )
__a : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__a : Optional[Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__a : int = [sys.executable] + distributed_args
execute_subprocess_async(__a , env=os.environ.copy() )
| 476 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : set , _SCREAMING_SNAKE_CASE : set , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : PriorityQueue , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__a : Any = cst_fwd.get(_SCREAMING_SNAKE_CASE , np.inf )
__a : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__a : Union[str, Any] = new_cost_f
__a : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__a : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict ):
__a : Union[str, Any] = -1
__a : str = set()
__a : str = set()
__a : List[str] = {source: 0}
__a : Dict = {destination: 0}
__a : Optional[int] = {source: None}
__a : Union[str, Any] = {destination: None}
__a : PriorityQueue[Any] = PriorityQueue()
__a : PriorityQueue[Any] = PriorityQueue()
__a : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__a , __a : List[str] = queue_forward.get()
visited_forward.add(_SCREAMING_SNAKE_CASE )
__a , __a : Tuple = queue_backward.get()
visited_backward.add(_SCREAMING_SNAKE_CASE )
__a : List[str] = pass_and_relaxation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
__a : Optional[Any] = pass_and_relaxation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__a : int = shortest_distance
return shortest_path_distance
__lowercase : List[str] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__lowercase : Any = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] )-> List[Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a =flax_key_tuple[:-1] + ("""weight""",)
a =torch.permute(UpperCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase_ ):
# linear layer
a =flax_key_tuple[:-1] + ("""weight""",)
a =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a =flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int )-> List[str]:
"""simple docstring"""
if "metadata" in layer:
a =layer.split("""metadata""" )
a ="""""".join(split_layer[0] )[:-1]
a =[tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a =layer.split("""kvstore""" )
a ="""""".join(split_layer[0] )[:-1]
a =[tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a =layer.split("""/""" )
a ="""/""".join(split_layer[:-1] )
a =(split_layer[-1],)
if "kvstore/path" in layer:
a =F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
a ="""file"""
else:
a =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int )-> str:
"""simple docstring"""
a =rename_keys(UpperCAmelCase_ )
a ={}
for k, v in current_block.items():
a =v
a =new_current_block
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str = WEIGHTS_NAME )-> Dict:
"""simple docstring"""
a =convert_file_size_to_int(UpperCAmelCase_ )
a =[]
a ={}
a =0
a =0
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a =serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a =flatten_dict(UpperCAmelCase_ , sep="""/""" )
a ={}
for layer in checkpoint_info.keys():
a , a , a =get_key_and_tensorstore_dict(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if curr_real_layer_name in all_layers:
a =content
else:
a ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a =torch.tensor(UpperCAmelCase_ )
a =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a =rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCAmelCase_ )
a ="""/""".join(UpperCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a =os.path.join(
UpperCAmelCase_ , weights_name.replace(""".bin""" , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
a ={}
a =0
a =raw_weights.to(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a =os.path.join(UpperCAmelCase_ , weights_name.replace(""".bin""" , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a ={}
a ={}
for idx, shard in enumerate(UpperCAmelCase_ ):
a =weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
a =os.path.join(UpperCAmelCase_ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
a =shard
for key in shard:
a =shard_file
# Add the metadata
a ={"""total_size""": total_size}
a ={"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
a =json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + """\n"""
f.write(UpperCAmelCase_ )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase ( )-> Tuple:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a =SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a =SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a =TaTokenizer.from_pretrained("""t5-small""" )
a ="""A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a =tokenizer(UpperCAmelCase_ , return_tensors="""pt""" ).input_ids
a =model.generate(UpperCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 321 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = MvpTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = MvpTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Any = filter_roberta_detectors
def lowerCAmelCase__ ( self ):
super().setUp()
a =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
a =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a ={"""unk_token""": """<unk>"""}
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCAmelCase__ ( self ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a =[0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a =batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , _lowerCAmelCase )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertNotIn("""labels""" , _lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self ):
a =[
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization."""]
a =[
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , text_target=_lowerCAmelCase , return_tensors="""pt""" )
a =inputs["""input_ids"""]
a =inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
a =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
a ="""A, <mask> AllenNLP sentence."""
a =tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
a =tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
a =tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
a =tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 321 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__UpperCAmelCase = {
"google/pegasus-xsum": 512,
}
class lowercase_ ( a_ ):
__magic_name__ : str = VOCAB_FILES_NAMES
__magic_name__ : int = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = PegasusTokenizer
__magic_name__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : str="<pad>" , _lowercase : Dict="</s>" , _lowercase : int="<unk>" , _lowercase : str="<mask_2>" , _lowercase : Tuple="<mask_1>" , _lowercase : List[str]=None , _lowercase : List[Any]=1_0_3 , **_lowercase : Tuple , ):
lowerCAmelCase__ : List[Any] = offset
if additional_special_tokens is not None:
if not isinstance(_lowercase , _lowercase ):
raise TypeError(
f"additional_special_tokens should be of type {type(_lowercase )}, but is"
f" {type(_lowercase )}" )
lowerCAmelCase__ : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(_lowercase ) , self.offset - 1 )
]
if len(set(_lowercase ) ) != len(_lowercase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase__ : int = additional_special_tokens_extended
else:
lowerCAmelCase__ : Any = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
_lowercase , tokenizer_file=_lowercase , pad_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , mask_token=_lowercase , mask_token_sent=_lowercase , offset=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Any = False if not self.vocab_file else True
def _lowerCAmelCase ( self : Tuple , _lowercase : List[Any] ):
lowerCAmelCase__ : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCAmelCase ( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] = None , _lowercase : Any = False ):
if already_has_special_tokens:
return self._special_token_mask(_lowercase )
elif token_ids_a is None:
return self._special_token_mask(_lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCAmelCase ( self : List[str] , _lowercase : Dict , _lowercase : List[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : Any , _lowercase : Dict , _lowercase : Any = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 308 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ReformerTokenizer
UpperCamelCase : Optional[int] = ReformerTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Dict = False
UpperCamelCase : Dict = True
def __A ( self ) -> str:
'''simple docstring'''
super().setUp()
lowerCamelCase = ReformerTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = """<s>"""
lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(A ) , 10_00 )
def __A ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __A ( self ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = """I was born in 92000, and this is falsé."""
lowerCamelCase = tokenizer.tokenize(A )
lowerCamelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase = tokenizer.encode(A , add_special_tokens=A )
lowerCamelCase = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = tokenizer.encode(A )
lowerCamelCase = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def __A ( self , A=15 ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
lowerCamelCase = """This is a simple input"""
lowerCamelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase = ("""This is a simple input""", """This is a pair""")
lowerCamelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = ReformerTokenizer(A , keep_accents=A )
lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = """Hello World!"""
lowerCamelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@require_torch
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase = """ """.join(A )
lowerCamelCase = self.big_tokenizer.encode_plus(A , return_tensors="""pt""" )
lowerCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
lowerCamelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase = encoded_sequence["""input_ids"""].shape
lowerCamelCase = ReformerModel(A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A )
model(**A )
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = {"""input_ids""": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=A , sequences=A , )
| 457 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase( __lowercase ):
'''simple docstring'''
__a : Any = (DEISMultistepScheduler,)
__a : Tuple = (('num_inference_steps', 25),)
def snake_case_ ( self , **__a ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**_A )
return config
def snake_case_ ( self , __a=0 , **__a ):
__lowerCamelCase : str = dict(self.forward_default_kwargs )
__lowerCamelCase : List[Any] = kwargs.pop('num_inference_steps' , _A )
__lowerCamelCase : List[str] = self.dummy_sample
__lowerCamelCase : Any = 0.1 * sample
__lowerCamelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : Optional[int] = self.get_scheduler_config(**_A )
__lowerCamelCase : Any = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
__lowerCamelCase : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__lowerCamelCase : Dict = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
__lowerCamelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
__lowerCamelCase : Optional[Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
__lowerCamelCase : str = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
pass
def snake_case_ ( self , __a=0 , **__a ):
__lowerCamelCase : Optional[Any] = dict(self.forward_default_kwargs )
__lowerCamelCase : List[Any] = kwargs.pop('num_inference_steps' , _A )
__lowerCamelCase : Optional[Any] = self.dummy_sample
__lowerCamelCase : Tuple = 0.1 * sample
__lowerCamelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : List[str] = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__lowerCamelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
__lowerCamelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCamelCase : Union[str, Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
__lowerCamelCase : List[Any] = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self , __a=None , **__a ):
if scheduler is None:
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config(**_A )
__lowerCamelCase : str = scheduler_class(**_A )
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : Optional[int] = self.get_scheduler_config(**_A )
__lowerCamelCase : int = scheduler_class(**_A )
__lowerCamelCase : List[Any] = 10
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase : Dict = model(_A , _A )
__lowerCamelCase : Tuple = scheduler.step(_A , _A , _A ).prev_sample
return sample
def snake_case_ ( self ):
__lowerCamelCase : Dict = dict(self.forward_default_kwargs )
__lowerCamelCase : Dict = kwargs.pop('num_inference_steps' , _A )
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Any = scheduler_class(**_A )
__lowerCamelCase : Optional[int] = self.dummy_sample
__lowerCamelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , 'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , 'set_timesteps' ):
__lowerCamelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCamelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCamelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCamelCase : Optional[int] = scheduler.timesteps[5]
__lowerCamelCase : Any = scheduler.timesteps[6]
__lowerCamelCase : str = scheduler.step(_A , _A , _A , **_A ).prev_sample
__lowerCamelCase : str = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCamelCase : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCamelCase : List[str] = self.full_loop(scheduler=_A )
__lowerCamelCase : Optional[int] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
__lowerCamelCase : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCamelCase : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCamelCase : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCamelCase : Tuple = self.full_loop(scheduler=_A )
__lowerCamelCase : Any = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def snake_case_ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def snake_case_ ( self ):
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type='deis' , solver_order=_A , solver_type=_A , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def snake_case_ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
__lowerCamelCase : Tuple = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def snake_case_ ( self ):
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def snake_case_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.full_loop()
__lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCamelCase : Any = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
__lowerCamelCase : List[str] = scheduler_class(**_A )
__lowerCamelCase : List[Any] = 10
__lowerCamelCase : Optional[Any] = self.dummy_model()
__lowerCamelCase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase : Dict = model(_A , _A )
__lowerCamelCase : int = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
| 705 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCAmelCase ( A__: Tuple ) -> Union[str, Any]:
# getting number of pixels in the image
__lowerCamelCase , __lowerCamelCase : Optional[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
__lowerCamelCase : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ : Dict = imread('''image_data/lena.jpg''', 1)
# convert to its negative
a_ : str = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 263 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCamelCase ( cls : Optional[int] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
SCREAMING_SNAKE_CASE = FlaxBertModel(snake_case__ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ , repo_id='test-model-flax' , push_to_hub=snake_case__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
SCREAMING_SNAKE_CASE = FlaxBertModel(snake_case__ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case__ , repo_id='valid_org/test-model-flax-org' , push_to_hub=snake_case__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
SCREAMING_SNAKE_CASE = False
return models_are_equal
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
SCREAMING_SNAKE_CASE = FlaxBertModel(snake_case__ )
SCREAMING_SNAKE_CASE = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
SCREAMING_SNAKE_CASE = FlaxBertModel(snake_case__ )
SCREAMING_SNAKE_CASE = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) , max_shard_size='10KB' )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'bert'
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'bert'
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
| 439 |
import re
from filelock import FileLock
try:
import nltk
a_ : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
a_ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 439 | 1 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
def __call__( self ) -> Optional[Any]:
_a : str = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_a : str = 1
_a : str = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
_a : Tuple = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
_a : Dict = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase_ )
return result
| 701 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = """megatron-bert"""
def __init__( self , lowerCamelCase_=2_9_0_5_6 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=2_4 , lowerCamelCase_=1_6 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , **lowerCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_a : Union[str, Any] = vocab_size
_a : Any = hidden_size
_a : Tuple = num_hidden_layers
_a : Dict = num_attention_heads
_a : str = hidden_act
_a : Dict = intermediate_size
_a : Any = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : int = type_vocab_size
_a : Tuple = initializer_range
_a : Optional[Any] = layer_norm_eps
_a : str = position_embedding_type
_a : str = use_cache
| 424 | 0 |
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] ="SpeechT5FeatureExtractor"
SCREAMING_SNAKE_CASE_ : Optional[Any] ="SpeechT5Tokenizer"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('audio' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = kwargs.pop('text' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = kwargs.pop('text_target' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = kwargs.pop('audio_target' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE__ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
UpperCamelCase = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif text is not None:
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = None
if audio_target is not None:
UpperCamelCase = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = targets['input_values']
elif text_target is not None:
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = targets['input_ids']
else:
UpperCamelCase = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase = labels
UpperCamelCase = targets.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCamelCase = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('input_values' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = kwargs.pop('input_ids' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = kwargs.pop('labels' , SCREAMING_SNAKE_CASE__ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
UpperCamelCase = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif input_ids is not None:
UpperCamelCase = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and "input_ids" in labels[0]):
UpperCamelCase = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = targets['input_ids']
else:
UpperCamelCase = self.feature_extractor.feature_size
UpperCamelCase = self.feature_extractor.num_mel_bins
UpperCamelCase = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = feature_size_hack
UpperCamelCase = targets['input_values']
else:
UpperCamelCase = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase = labels
UpperCamelCase = targets.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCamelCase = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 282 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_snake_case = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , _lowercase=True ) -> List[Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models )
UpperCamelCase = config_class.from_json_file(_lowercase )
UpperCamelCase = True
UpperCamelCase = True
print(F'Building TensorFlow model from configuration: {config}' )
UpperCamelCase = model_class(_lowercase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCamelCase = cached_file(
_lowercase , _lowercase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCamelCase = load_pytorch_checkpoint_in_tfa_model(_lowercase , _lowercase )
if compare_with_pt_model:
UpperCamelCase = tf_model(tf_model.dummy_inputs , training=_lowercase ) # build the network
UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
UpperCamelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_lowercase , config=_lowercase , state_dict=_lowercase )
with torch.no_grad():
UpperCamelCase = pt_model(**pt_model.dummy_inputs )
UpperCamelCase = pto[0].numpy()
UpperCamelCase = tfo[0].numpy()
UpperCamelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2e-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(_lowercase , save_format='h5' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=False , ) -> int:
if args_model_type is None:
UpperCamelCase = list(MODEL_CLASSES.keys() )
else:
UpperCamelCase = [args_model_type]
for j, model_type in enumerate(_lowercase , start=1 ):
print('=' * 100 )
print(F' Converting model type {j}/{len(_lowercase )}: {model_type}' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCamelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCamelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_lowercase , _lowercase ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
UpperCamelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(_lowercase )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models )
else:
UpperCamelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models )
else:
UpperCamelCase = model_shortcut_name
if os.path.isfile(_lowercase ):
UpperCamelCase = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_lowercase , pytorch_checkpoint_path=_lowercase , config_file=_lowercase , tf_dump_path=os.path.join(_lowercase , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_lowercase , )
if remove_cached_files:
os.remove(_lowercase )
os.remove(_lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
_snake_case = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 282 | 1 |
"""simple docstring"""
def __lowercase ( a : int ) -> bool:
if not isinstance(a , a ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__snake_case : List[str] =str(a )
__snake_case : Optional[int] =''''''.join(sorted(a ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowercase ( a : float = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__snake_case : Any =0
__snake_case : Dict =1
while True:
if check_bouncy(a ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 497 |
"""simple docstring"""
def __lowercase ( a : str , a : str ) -> str:
__snake_case : int =len(a )
__snake_case : int =len(a )
__snake_case : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
__snake_case : list =[]
for char_count in range(a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 497 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0.0 , _UpperCamelCase = None , _UpperCamelCase = "geglu" , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = "layer_norm" , _UpperCamelCase = False , ) -> str:
"""simple docstring"""
super().__init__()
__snake_case = only_cross_attention
__snake_case = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__snake_case = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__snake_case = AdaLayerNorm(_UpperCamelCase , _UpperCamelCase )
elif self.use_ada_layer_norm_zero:
__snake_case = AdaLayerNormZero(_UpperCamelCase , _UpperCamelCase )
else:
__snake_case = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
__snake_case = Attention(
query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__snake_case = (
AdaLayerNorm(_UpperCamelCase , _UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
)
__snake_case = Attention(
query_dim=_UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , upcast_attention=_UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
__snake_case = None
__snake_case = None
# 3. Feed-forward
__snake_case = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
__snake_case = FeedForward(_UpperCamelCase , dropout=_UpperCamelCase , activation_fn=_UpperCamelCase , final_dropout=_UpperCamelCase )
# let chunk size default to None
__snake_case = None
__snake_case = 0
def a ( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case = chunk_size
__snake_case = dim
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ) -> str:
"""simple docstring"""
if self.use_ada_layer_norm:
__snake_case = self.norma(_UpperCamelCase , _UpperCamelCase )
elif self.use_ada_layer_norm_zero:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.norma(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hidden_dtype=hidden_states.dtype )
else:
__snake_case = self.norma(_UpperCamelCase )
__snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__snake_case = self.attna(
_UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
if self.use_ada_layer_norm_zero:
__snake_case = gate_msa.unsqueeze(1 ) * attn_output
__snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__snake_case = (
self.norma(_UpperCamelCase , _UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
__snake_case = self.attna(
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
__snake_case = attn_output + hidden_states
# 3. Feed-forward
__snake_case = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
__snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__snake_case = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__snake_case = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
__snake_case = gate_mlp.unsqueeze(1 ) * ff_output
__snake_case = ff_output + hidden_states
return hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = "geglu" , _UpperCamelCase = False , ) -> int:
"""simple docstring"""
super().__init__()
__snake_case = int(dim * mult )
__snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__snake_case = GELU(_UpperCamelCase , _UpperCamelCase )
if activation_fn == "gelu-approximate":
__snake_case = GELU(_UpperCamelCase , _UpperCamelCase , approximate="""tanh""" )
elif activation_fn == "geglu":
__snake_case = GEGLU(_UpperCamelCase , _UpperCamelCase )
elif activation_fn == "geglu-approximate":
__snake_case = ApproximateGELU(_UpperCamelCase , _UpperCamelCase )
__snake_case = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase , _UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def a ( self , _UpperCamelCase ) -> Any:
"""simple docstring"""
for module in self.net:
__snake_case = module(_UpperCamelCase )
return hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = "none" ) -> Dict:
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(_UpperCamelCase , _UpperCamelCase )
__snake_case = approximate
def a ( self , _UpperCamelCase ) -> Any:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def a ( self , _UpperCamelCase ) -> Dict:
"""simple docstring"""
__snake_case = self.proj(_UpperCamelCase )
__snake_case = self.gelu(_UpperCamelCase )
return hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(_UpperCamelCase , dim_out * 2 )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a ( self , _UpperCamelCase ) -> int:
"""simple docstring"""
__snake_case , __snake_case = self.proj(_UpperCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(_UpperCamelCase , _UpperCamelCase )
def a ( self , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.702 * x )
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
super().__init__()
__snake_case = nn.Embedding(_UpperCamelCase , _UpperCamelCase )
__snake_case = nn.SiLU()
__snake_case = nn.Linear(_UpperCamelCase , embedding_dim * 2 )
__snake_case = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
__snake_case , __snake_case = torch.chunk(_UpperCamelCase , 2 )
__snake_case = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__()
__snake_case = CombinedTimestepLabelEmbeddings(_UpperCamelCase , _UpperCamelCase )
__snake_case = nn.SiLU()
__snake_case = nn.Linear(_UpperCamelCase , 6 * embedding_dim , bias=_UpperCamelCase )
__snake_case = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase , eps=1E-6 )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case = self.linear(self.silu(self.emb(_UpperCamelCase , _UpperCamelCase , hidden_dtype=_UpperCamelCase ) ) )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = emb.chunk(6 , dim=1 )
__snake_case = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = 1E-5 ) -> List[str]:
"""simple docstring"""
super().__init__()
__snake_case = num_groups
__snake_case = eps
if act_fn is None:
__snake_case = None
else:
__snake_case = get_activation(_UpperCamelCase )
__snake_case = nn.Linear(_UpperCamelCase , out_dim * 2 )
def a ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if self.act:
__snake_case = self.act(_UpperCamelCase )
__snake_case = self.linear(_UpperCamelCase )
__snake_case = emb[:, :, None, None]
__snake_case , __snake_case = emb.chunk(2 , dim=1 )
__snake_case = F.group_norm(_UpperCamelCase , self.num_groups , eps=self.eps )
__snake_case = x * (1 + scale) + shift
return x
| 268 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCamelCase__ = '''Create a default config file for Accelerate with only a few flags set.'''
def lowerCamelCase__ ( __A :List[Any]="no" ,__A :str = default_json_config_file ,__A :bool = False ):
"""simple docstring"""
__snake_case = Path(__A )
path.parent.mkdir(parents=__A ,exist_ok=__A )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__snake_case = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__snake_case = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
__snake_case = torch.cuda.device_count()
__snake_case = num_gpus
__snake_case = False
if num_gpus > 1:
__snake_case = """MULTI_GPU"""
else:
__snake_case = """NO"""
elif is_xpu_available() and use_xpu:
__snake_case = torch.xpu.device_count()
__snake_case = num_xpus
__snake_case = False
if num_xpus > 1:
__snake_case = """MULTI_XPU"""
else:
__snake_case = """NO"""
elif is_npu_available():
__snake_case = torch.npu.device_count()
__snake_case = num_npus
__snake_case = False
if num_npus > 1:
__snake_case = """MULTI_NPU"""
else:
__snake_case = """NO"""
else:
__snake_case = 0
__snake_case = True
__snake_case = 1
__snake_case = """NO"""
__snake_case = ClusterConfig(**__A )
config.to_json_file(__A )
return path
def lowerCamelCase__ ( __A :Dict ,__A :List[Any] ):
"""simple docstring"""
__snake_case = parser.add_parser("""default""" ,parents=__A ,help=__A ,formatter_class=__A )
parser.add_argument(
"""--config_file""" ,default=__A ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) ,dest="""save_location""" ,)
parser.add_argument(
"""--mixed_precision""" ,choices=["""no""", """fp16""", """bf16"""] ,type=__A ,help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" ,default="""no""" ,)
parser.set_defaults(func=__A )
return parser
def lowerCamelCase__ ( __A :Optional[Any] ):
"""simple docstring"""
__snake_case = write_basic_config(args.mixed_precision ,args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 268 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=13 , _lowerCamelCase : List[str]=7 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]=99 , _lowerCamelCase : Optional[Any]=32 , _lowerCamelCase : Optional[Any]=5 , _lowerCamelCase : int=4 , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : str=5_12 , _lowerCamelCase : Optional[int]=16 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : int=None , ):
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Any = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : List[Any] = scope
def a__ ( self : str ):
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = None
if self.use_input_mask:
_UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : str ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : int ):
_UpperCAmelCase : List[Any] = DistilBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : str ):
_UpperCAmelCase : Optional[Any] = DistilBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ):
_UpperCAmelCase : Optional[int] = DistilBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Dict = DistilBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple ):
_UpperCAmelCase : Optional[int] = self.num_labels
_UpperCAmelCase : Tuple = DistilBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = self.num_choices
_UpperCAmelCase : Union[str, Any] = DistilBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
((_UpperCAmelCase) ,(_UpperCAmelCase) ,(_UpperCAmelCase) ,(_UpperCAmelCase) ,(_UpperCAmelCase) ,(_UpperCAmelCase)) : List[Any] = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A: Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__A: Optional[int] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A: Union[str, Any] = True
__A: Optional[Any] = True
__A: str = True
__A: Optional[int] = True
def a__ ( self : int ):
_UpperCAmelCase : int = DistilBertModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , dim=37 )
def a__ ( self : int ):
self.config_tester.run_common_tests()
def a__ ( self : Tuple ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCamelCase )
def a__ ( self : int ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCamelCase )
def a__ ( self : Optional[int] ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCamelCase )
def a__ ( self : Tuple ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCamelCase )
def a__ ( self : Any ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCamelCase )
def a__ ( self : Any ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCamelCase )
@slow
def a__ ( self : List[Any] ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = DistilBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def a__ ( self : List[Any] ):
_UpperCAmelCase ,_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Optional[Any] = model_class(config=_lowerCamelCase )
_UpperCAmelCase : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : List[Any] = torch.jit.trace(
_lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , "traced_model.pt" ) )
_UpperCAmelCase : List[Any] = torch.jit.load(os.path.join(_lowerCamelCase , "traced_model.pt" ) , map_location=_lowerCamelCase )
loaded(inputs_dict["input_ids"].to(_lowerCamelCase ) , inputs_dict["attention_mask"].to(_lowerCamelCase ) )
@require_torch
class _UpperCamelCase( unittest.TestCase ):
@slow
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : List[str] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase : str = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_UpperCAmelCase : int = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _lowerCamelCase )
_UpperCAmelCase : List[str] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) )
| 328 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: List[str] = ["""input_features""", """attention_mask"""]
def __init__( self : Optional[int] , _lowerCamelCase : int=80 , _lowerCamelCase : int=1_60_00 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]=25 , _lowerCamelCase : int="hamming_window" , _lowerCamelCase : List[Any]=3_27_68.0 , _lowerCamelCase : List[Any]=0.97 , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Optional[Any] , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Tuple = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Tuple = padding_value
_UpperCAmelCase : List[Any] = hop_length
_UpperCAmelCase : Union[str, Any] = win_length
_UpperCAmelCase : str = frame_signal_scale
_UpperCAmelCase : Optional[Any] = preemphasis_coeff
_UpperCAmelCase : Optional[Any] = mel_floor
_UpperCAmelCase : Optional[Any] = normalize_means
_UpperCAmelCase : Optional[int] = normalize_vars
_UpperCAmelCase : Dict = win_function
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : Tuple = win_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Optional[Any] = (self.n_fft // 2) + 1
def a__ ( self : Any , _lowerCamelCase : np.array ):
if self.win_function == "hamming_window":
_UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
else:
_UpperCAmelCase : str = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : Dict = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCamelCase , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def a__ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
# make sure we normalize float32 arrays
if self.normalize_means:
_UpperCAmelCase : Union[str, Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : List[Any] = np.subtract(_lowerCamelCase , _lowerCamelCase )
if self.normalize_vars:
_UpperCAmelCase : Optional[int] = x[:input_length].std(axis=0 )
_UpperCAmelCase : Any = np.divide(_lowerCamelCase , _lowerCamelCase )
if input_length < x.shape[0]:
_UpperCAmelCase : Tuple = padding_value
# make sure array is in float32
_UpperCAmelCase : int = x.astype(np.floataa )
return x
def a__ ( self : Any , _lowerCamelCase : List[np.ndarray] , _lowerCamelCase : Optional[np.ndarray] = None ):
_UpperCAmelCase : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCamelCase , _lowerCamelCase , self.padding_value ) for x, n in zip(_lowerCamelCase , _lowerCamelCase )]
def __call__( self : List[Any] , _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : str , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Optional[Any] = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : int = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : List[str] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
_UpperCAmelCase : int = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [raw_speech]
# extract fbank features
_UpperCAmelCase : Dict = [self._extract_mfsc_features(_lowerCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : List[str] = BatchFeature({"input_features": features} )
_UpperCAmelCase : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
# make sure list is in array format
_UpperCAmelCase : Dict = padded_inputs.get("input_features" )
if isinstance(input_features[0] , _lowerCamelCase ):
_UpperCAmelCase : Tuple = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Optional[Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Union[str, Any] = (
np.array(_lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : int = self.normalize(
padded_inputs["input_features"] , attention_mask=_lowerCamelCase )
if return_tensors is not None:
_UpperCAmelCase : Optional[int] = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 328 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (a__ ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=125 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__: List[Any] = [F"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase__: Tuple = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
lowercase__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
lowercase__: Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
lowercase__: Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__: Tuple = extra_ids
lowercase__: int = 2**8 # utf is 8 bits
# define special tokens dict
lowercase__: Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowercase__: str = len(self.special_tokens_encoder )
lowercase__: Union[str, Any] = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
lowercase__: List[str] = self.vocab_size + i - n
lowercase__: Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _snake_case ( self , _UpperCAmelCase ):
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: int = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
lowercase__: Tuple = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = [chr(_UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def _snake_case ( self , _UpperCAmelCase ):
if token in self.special_tokens_encoder:
lowercase__: Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowercase__: Any = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
lowercase__: List[Any] = self.unk_token_id
else:
lowercase__: str = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def _snake_case ( self , _UpperCAmelCase ):
if index in self.special_tokens_decoder:
lowercase__: Tuple = self.special_tokens_decoder[index]
else:
lowercase__: Any = chr(index - self._num_special_tokens )
return token
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = b""
for token in tokens:
if token in self.special_tokens_decoder:
lowercase__: str = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
lowercase__: Optional[int] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
lowercase__: int = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
lowercase__: Dict = token.encode('''utf-8''' )
else:
lowercase__: str = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
lowercase__: Tuple = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
return ()
| 586 | """simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : int=13 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : str=4 , UpperCAmelCase : str=[10, 20, 30, 40] , UpperCAmelCase : str=[2, 2, 3, 2] , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Any=10 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Dict=["stage2", "stage3", "stage4"] , UpperCAmelCase : List[str]=[2, 3, 4] , UpperCAmelCase : int=None , ):
__lowerCamelCase : Dict = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Any = num_stages
__lowerCamelCase : List[str] = hidden_sizes
__lowerCamelCase : int = depths
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Optional[int] = use_labels
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : Any = num_labels
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = out_features
__lowerCamelCase : Union[str, Any] = out_indices
__lowerCamelCase : Dict = scope
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[Any] = None
if self.use_labels:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Union[str, Any] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Any = ConvNextModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Dict = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : List[Any] = ConvNextForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
__lowerCamelCase : Any = ConvNextBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Union[str, Any] = model(UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCamelCase : str = None
__lowerCamelCase : int = ConvNextBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Any = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Dict = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[Any] = ConvNextModelTester(self )
__lowerCamelCase : Any = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Optional[int] ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(UpperCAmelCase )
__lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
def check_hidden_states_output(UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : int = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : str = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : List[Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = ConvNextModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : str = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(UpperCAmelCase )
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : str = prepare_img()
__lowerCamelCase : List[str] = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify the logits
__lowerCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCamelCase : Tuple = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@require_torch
class _snake_case ( unittest.TestCase , a__ ):
snake_case__ = (ConvNextBackbone,) if is_torch_available() else ()
snake_case__ = ConvNextConfig
snake_case__ = False
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = ConvNextModelTester(self ) | 646 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__lowercase = logging.get_logger(__name__)
class _lowercase ( _A ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 717 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowercase :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=4 , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase =prepare_led_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tf.concat(
[tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]] , axis=-1 , )
__UpperCamelCase =global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFLEDModel(config=UpperCamelCase__ ).get_decoder()
__UpperCamelCase =inputs_dict['''input_ids''']
__UpperCamelCase =input_ids[:1, :]
__UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase =1
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =tf.zeros_like(inputs_dict['''attention_mask'''] )
__UpperCamelCase =2
__UpperCamelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase =True
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase__ : Tuple ):
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase__ : Dict ):
__UpperCamelCase =[t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 296 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = year % 19
lowerCAmelCase__ : Tuple = year % 4
lowerCAmelCase__ : Tuple = year % 7
lowerCAmelCase__ : Optional[Any] = math.floor(year / 100 )
lowerCAmelCase__ : Optional[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ : str = leap_day_inhibits / 4
lowerCAmelCase__ : Tuple = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ : Dict = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_ , 4 , 18 )
else:
return datetime(lowerCAmelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
_lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 565 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE :Optional[int] = random.Random()
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Tuple=1.0 , lowerCAmelCase_ :Optional[int]=None , lowerCAmelCase_ :List[Any]=None )->Dict:
'''simple docstring'''
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : List[Any]=4_0_0 , _lowerCAmelCase : Optional[Any]=2_0_0_0 , _lowerCAmelCase : Tuple=1_0 , _lowerCAmelCase : Optional[int]=1_6_0 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Dict=4_0_0_0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[str]=True , ) -> str:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
snake_case_ = feature_size
snake_case_ = chunk_length
snake_case_ = hop_length
def lowerCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _flatten(_lowerCAmelCase : Union[str, Any] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
snake_case_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
snake_case_ = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
snake_case_ = feat_extract_first.to_dict()
snake_case_ = feat_extract_second.to_dict()
snake_case_ = feat_extract_first.mel_filters
snake_case_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
snake_case_ = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
snake_case_ = feat_extract_first.to_dict()
snake_case_ = feat_extract_second.to_dict()
snake_case_ = feat_extract_first.mel_filters
snake_case_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ = feature_extractor(_lowerCAmelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ = np.asarray(_lowerCAmelCase )
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
snake_case_ = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
snake_case_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs_truncated]
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
snake_case_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case_ = ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
# fmt: off
snake_case_ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = WhisperFeatureExtractor()
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , _lowerCAmelCase , atol=1e-4 ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = self._load_datasamples(1 )[0]
snake_case_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(_lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 283 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : str = 11
UpperCAmelCase_ : Optional[Any] = int('1' + '0' * digit_len )
for num in range(lowerCamelCase_ , lowerCamelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase_ , lowerCamelCase_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase_ : str = 10
return solutions
def _lowerCamelCase ( lowerCamelCase_ : int = 2 ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 1.0
for fraction in fraction_list(lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = Fraction(lowerCamelCase_ )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(solution())
| 701 | '''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Optional[int] = '''BlipImageProcessor'''
lowerCamelCase_ :Union[str, Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = False
super().__init__(snake_case_ , snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCAmelCase_ : str = self.tokenizer
UpperCAmelCase_ : Optional[int] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 389 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.