code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( snake_case : str , snake_case : str ):
'''simple docstring'''
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = 0
for i in range(len(snake_case ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ = "_"
if count > 1:
return False
else:
return "".join(snake_case )
def UpperCamelCase_( snake_case : list[str] ):
'''simple docstring'''
snake_case_ = []
while True:
snake_case_ = ["$"] * len(snake_case )
snake_case_ = []
for i in range(len(snake_case ) ):
for j in range(i + 1 , len(snake_case ) ):
snake_case_ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case_ = "*"
snake_case_ = "*"
temp.append("X" )
for i in range(len(snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case ) == 0:
return pi
snake_case_ = list(set(snake_case ) )
def UpperCamelCase_( snake_case : int , snake_case : Sequence[float] ):
'''simple docstring'''
snake_case_ = []
for minterm in minterms:
snake_case_ = ""
for _ in range(snake_case ):
snake_case_ = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case )
return temp
def UpperCamelCase_( snake_case : str , snake_case : str , snake_case : int ):
'''simple docstring'''
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = 0
for i in range(len(snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( snake_case : list[list[int]] , snake_case : list[str] ):
'''simple docstring'''
snake_case_ = []
snake_case_ = [0] * len(snake_case )
for i in range(len(chart[0] ) ):
snake_case_ = 0
snake_case_ = -1
for j in range(len(snake_case ) ):
if chart[j][i] == 1:
count += 1
snake_case_ = j
if count == 1:
snake_case_ = 1
for i in range(len(snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case ) ):
snake_case_ = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ = 0
snake_case_ = -1
snake_case_ = 0
for i in range(len(snake_case ) ):
snake_case_ = chart[i].count(1 )
if count_n > max_n:
snake_case_ = count_n
snake_case_ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case ) ):
snake_case_ = 0
def UpperCamelCase_( snake_case : list[str] , snake_case : list[str] ):
'''simple docstring'''
snake_case_ = [[0 for x in range(len(snake_case ) )] for x in range(len(snake_case ) )]
for i in range(len(snake_case ) ):
snake_case_ = prime_implicants[i].count("_" )
for j in range(len(snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case ):
snake_case_ = 1
return chart
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = int(input("Enter the no. of variables\n" ) )
snake_case_ = [
float(snake_case )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ = decimal_to_binary(snake_case , snake_case )
snake_case_ = check(snake_case )
print("Prime Implicants are:" )
print(snake_case )
snake_case_ = prime_implicant_chart(snake_case , snake_case )
snake_case_ = selection(snake_case , snake_case )
print("Essential Prime Implicants are:" )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' ) as f:
_UpperCAmelCase = json.load(_UpperCAmelCase )
else:
raise ValueError(F"can't find {path}" )
return results
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
import xla_spawn
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(A , 'argv' , A):
_UpperCAmelCase = time()
xla_spawn.main()
_UpperCAmelCase = time()
_UpperCAmelCase = get_results(A)
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
import xla_spawn
_UpperCAmelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(A , 'argv' , A):
xla_spawn.main()
| 290 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''xmod'''
def __init__( self : Optional[int] , A : int=3_05_22 , A : Tuple=7_68 , A : Optional[Any]=12 , A : Tuple=12 , A : str=30_72 , A : List[str]="gelu" , A : Any=0.1 , A : int=0.1 , A : Dict=5_12 , A : Optional[Any]=2 , A : Optional[Any]=0.0_2 , A : List[Any]=1E-12 , A : int=1 , A : Tuple=0 , A : Optional[Any]=2 , A : int="absolute" , A : Union[str, Any]=True , A : List[Any]=None , A : Optional[Any]=False , A : List[str]=2 , A : int=False , A : str=True , A : Optional[Any]=True , A : Tuple=("en_XX",) , A : Optional[int]=None , **A : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = pre_norm
_UpperCAmelCase = adapter_reduction_factor
_UpperCAmelCase = adapter_layer_norm
_UpperCAmelCase = adapter_reuse_layer_norm
_UpperCAmelCase = ln_before_adapter
_UpperCAmelCase = list(A)
_UpperCAmelCase = default_language
class __lowerCAmelCase ( A ):
@property
def _lowerCamelCase ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """spm_char.model"""}
_A = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
_A = {
"""microsoft/speecht5_asr""": 10_24,
"""microsoft/speecht5_tts""": 10_24,
"""microsoft/speecht5_vc""": 10_24,
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__(self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = vocab_file
UpperCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase__ : Optional[Any] = None
return state
def __setstate__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[str] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCAmelCase__ : Tuple = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _a (self , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [1]
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + suffix_ones
return ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Optional[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
UpperCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 171 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 10_00,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_A = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 10_00,
"""block_out_channels""": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_A = {
"""sample_size""": 2_56,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_A = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_A = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_A = {
"""num_train_timesteps""": 1_51,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def a__ ( lowerCAmelCase ) -> Tuple:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase__ : Optional[int] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase__ : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase__ : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase__ : Dict = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase__ : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase__ : Dict = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase__ : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase__ : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Any = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Optional[Any] = torch.load(lowerCAmelCase , map_location="""cpu""" )
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : List[Any] = checkpoint["""time_embed.0.weight"""]
UpperCAmelCase__ : str = checkpoint["""time_embed.0.bias"""]
UpperCAmelCase__ : List[str] = checkpoint["""time_embed.2.weight"""]
UpperCAmelCase__ : Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase__ : Dict = checkpoint["""label_emb.weight"""]
UpperCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""]
UpperCAmelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
UpperCAmelCase__ : List[str] = unet_config["""down_block_types"""]
UpperCAmelCase__ : Tuple = unet_config["""layers_per_block"""]
UpperCAmelCase__ : int = unet_config["""attention_head_dim"""]
UpperCAmelCase__ : Union[str, Any] = unet_config["""block_out_channels"""]
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Union[str, Any] = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = channels_list[i]
UpperCAmelCase__ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : List[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase__ : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Optional[Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Any = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : Optional[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase__ : int = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
UpperCAmelCase__ : Dict = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase__ : int = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase__ : Union[str, Any] = convert_attention(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : Any = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase__ : List[str] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase__ : Tuple = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
UpperCAmelCase__ : Tuple = current_channels
# hardcoded the mid-block for now
UpperCAmelCase__ : List[Any] = """mid_block.resnets.0"""
UpperCAmelCase__ : str = """middle_block.0"""
UpperCAmelCase__ : List[str] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = """mid_block.attentions.0"""
UpperCAmelCase__ : Any = """middle_block.1"""
UpperCAmelCase__ : Optional[int] = convert_attention(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = """mid_block.resnets.1"""
UpperCAmelCase__ : Tuple = """middle_block.2"""
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Dict = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : Optional[Any] = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase__ : Dict = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : List[str] = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase__ : Any = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : List[str] = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : Dict = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase__ : Any = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase__ : List[str] = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase__ : Dict = convert_attention(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase__ : int = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = checkpoint["""out.0.weight"""]
UpperCAmelCase__ : List[Any] = checkpoint["""out.0.bias"""]
UpperCAmelCase__ : Tuple = checkpoint["""out.2.weight"""]
UpperCAmelCase__ : Optional[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_A = parser.parse_args()
_A = strabool(args.class_cond)
_A = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_A = None
_A = con_pt_to_diffuser(args.unet_path, unet_config)
_A = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
_A = CMStochasticIterativeScheduler(**scheduler_config)
_A = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 171 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
SCREAMING_SNAKE_CASE__ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
SCREAMING_SNAKE_CASE__ = 'UperNetConfig'
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1 , ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Convad(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.ReLU()
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase = self.conv(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.batch_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.activation(_SCREAMING_SNAKE_CASE )
return output
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase = [
nn.AdaptiveAvgPoolad(_SCREAMING_SNAKE_CASE ),
UperNetConvModule(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase = input
for layer in self.layers:
UpperCamelCase = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase = pool_scales
UpperCamelCase = align_corners
UpperCamelCase = in_channels
UpperCamelCase = channels
UpperCamelCase = []
for i, pool_scale in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = UperNetPyramidPoolingBlock(pool_scale=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , channels=_SCREAMING_SNAKE_CASE )
self.blocks.append(_SCREAMING_SNAKE_CASE )
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[torch.Tensor]:
"""simple docstring"""
UpperCamelCase = []
for ppm in self.blocks:
UpperCamelCase = ppm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(_SCREAMING_SNAKE_CASE )
return ppm_outs
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__()
UpperCamelCase = config
UpperCamelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase = in_channels
UpperCamelCase = config.hidden_size
UpperCamelCase = False
UpperCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase = UperNetConvModule(_SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_SCREAMING_SNAKE_CASE )
self.fpn_convs.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.apply(self._init_weights )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = inputs[-1]
UpperCamelCase = [x]
psp_outs.extend(self.psp_modules(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = self.bottleneck(_SCREAMING_SNAKE_CASE )
return output
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase = laterals[i - 1].shape[2:]
UpperCamelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_SCREAMING_SNAKE_CASE , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
UpperCamelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
UpperCamelCase = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = self.fpn_bottleneck(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase = config
UpperCamelCase = config.auxiliary_in_channels
UpperCamelCase = config.auxiliary_channels
UpperCamelCase = config.auxiliary_num_convs
UpperCamelCase = config.auxiliary_concat_input
UpperCamelCase = in_index
UpperCamelCase = (kernel_size // 2) * dilation
UpperCamelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase = nn.Identity()
else:
UpperCamelCase = nn.Sequential(*_SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A__ ( self ) -> Dict:
"""simple docstring"""
self.apply(self._init_weights )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase = encoder_hidden_states[self.in_index]
UpperCamelCase = self.convs(_SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class a_ ( lowerCamelCase ):
lowercase = UperNetConfig
lowercase = """pixel_values"""
lowercase = True
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A__ ( self ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = value
SCREAMING_SNAKE_CASE__ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE__ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCamelCase , )
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCamelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase = UperNetHead(_SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase = UperNetFCNHead(_SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def A__ ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase = self.backbone.forward_with_filtered_kwargs(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.feature_maps
UpperCamelCase = self.decode_head(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.interpolate(_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
UpperCamelCase = None
if self.auxiliary_head is not None:
UpperCamelCase = self.auxiliary_head(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
UpperCamelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
UpperCamelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase = (logits,) + outputs[1:]
else:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 366 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE__ = 'scheduler_config.json'
class a_ ( lowerCamelCase ):
lowercase = 1
lowercase = 2
lowercase = 3
lowercase = 4
lowercase = 5
lowercase = 6
lowercase = 7
lowercase = 8
lowercase = 9
lowercase = 10
lowercase = 11
lowercase = 12
lowercase = 13
lowercase = 14
@dataclass
class a_ ( lowerCamelCase ):
lowercase = 42
class a_ :
lowercase = SCHEDULER_CONFIG_NAME
lowercase = []
lowercase = True
@classmethod
def A__ ( cls , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , return_commit_hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return cls.from_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self.save_config(save_directory=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def A__ ( cls ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase = [
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 183 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = point_y / 4 / point_x
lowerCAmelCase_ :Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase_ :Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase_ :str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase_ :Tuple = outgoing_gradient**2 + 4
lowerCAmelCase_ :Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase_ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCAmelCase_ :Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase_ :Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase_ :List[Any] = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
lowerCAmelCase_ :List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _snake_case ( lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :float = first_x_coord
lowerCAmelCase_ :float = first_y_coord
lowerCAmelCase_ :float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = GPTSanJapaneseTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
__snake_case: str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__snake_case: List[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__snake_case: Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def UpperCAmelCase__ ( self : Optional[int] , **A : int ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Dict ):
__snake_case: Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__snake_case: str = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[int] ):
__snake_case , __snake_case: Optional[int] = self.get_input_output_texts(A )
__snake_case: Optional[Any] = tokenizer.encode(A , add_special_tokens=A )
__snake_case: int = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def UpperCAmelCase__ ( self : int ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Dict ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Optional[int] ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = self.get_tokenizer()
# Testing tokenization
__snake_case: List[str] = """こんにちは、世界。 こんばんは、㔺界。"""
__snake_case: Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__snake_case: Any = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
__snake_case: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case: str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
__snake_case: Optional[int] = tokens + [tokenizer.unk_token]
__snake_case: List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case: str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = self.get_tokenizer()
# Testing tokenization
__snake_case: Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__snake_case: Optional[Any] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__snake_case: Union[str, Any] = tokenizer.encode(A )
__snake_case: List[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case: Any = """こんにちは、世界。"""
__snake_case: Tuple = """こんばんは、㔺界。😀"""
__snake_case: Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__snake_case: int = tokenizer.encode(prefix_text + input_text )
__snake_case: Union[str, Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__snake_case: Tuple = tokenizer.encode(A , prefix_text=A )
__snake_case: Union[str, Any] = tokenizer.decode(A )
__snake_case: Dict = tokenizer.decode(A )
__snake_case: Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case: Optional[int] = """こんにちは、世界。"""
__snake_case: Any = """こんばんは、㔺界。😀"""
__snake_case: Optional[int] = len(tokenizer.encode(A ) ) - 2
__snake_case: str = len(tokenizer.encode(A ) ) - 2
__snake_case: Dict = [1] + [0] * (len_prefix + len_text + 1)
__snake_case: str = [1] * (len_prefix + len_text + 1) + [0]
__snake_case: List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case: int = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case: Optional[int] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case: Tuple = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case: int = tokenizer.encode("""あンいワ""" )
__snake_case: Optional[int] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__snake_case: List[Any] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase__ ( self : int ):
__snake_case: List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case: Union[str, Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__snake_case: Optional[int] = tokenizer(A , padding=A )
__snake_case: int = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
__snake_case: List[str] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case: int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase__ ( self : List[str] ):
# tokenizer has no padding token
pass
| 111 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_a , 0 , _a , args=(_a) )[0]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
return math.pow(_a , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase, _a ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = self.tool('''hey''' )
snake_case_ : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = self.tool('''hey''' )
snake_case_ : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 279 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: int=3 , UpperCAmelCase_: List[str]=30 , UpperCAmelCase_: Union[str, Any]=400 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase_: Any=[0.5, 0.5, 0.5] , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]=1 / 255 , UpperCAmelCase_: int=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE = self.image_processing_class(do_resize=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_SCREAMING_SNAKE_CASE = image_processing_a.pad(UpperCAmelCase_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = image_processing_a(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""image_id""": 39_769, """annotations""": target}
# encode them
_SCREAMING_SNAKE_CASE = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
_SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_SCREAMING_SNAKE_CASE = YolosImageProcessor(format="""coco_panoptic""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify masks
_SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
| 306 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE_: str =get_tests_dir('fixtures')
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase_ ) as mock_head:
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase (self : List[str] ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __A ( unittest.TestCase ):
@classmethod
def _lowercase (cls : Optional[int] ):
UpperCAmelCase_ = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def _lowercase (cls : List[str] ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowercase (self : str ):
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="test-feature-extractor" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowercase (self : Dict ):
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 357 | '''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
a__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Tuple =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __A ( _BaseAutoModelClass ):
a__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_: Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_: int =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 106 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__(self : Dict , __a : List[Any] , __a : str=12 , __a : Optional[Any]=7 , __a : Optional[int]=True , __a : Union[str, Any]=True , __a : Optional[int]=True , __a : Dict=99 , __a : Union[str, Any]=32 , __a : Dict=32 , __a : Dict=2 , __a : Tuple=4 , __a : Optional[int]=37 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[Any]=512 , __a : Tuple=0.02 , __a : Union[str, Any]=0 , __a : Optional[int]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = bos_token_id
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ = input_mask.numpy()
UpperCAmelCase_ , UpperCAmelCase_ = input_mask.shape
UpperCAmelCase_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def _lowercase (self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase (self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = TFBlipTextModel(config=_UpperCamelCase )
UpperCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , training=_UpperCamelCase )
UpperCAmelCase_ = model(_UpperCamelCase , training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase (self : str ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A ( __A , unittest.TestCase ):
a__ : List[Any] = (TFBlipTextModel,) if is_tf_available() else ()
a__ : Any = False
a__ : Optional[Any] = False
a__ : str = False
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = BlipTextModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def _lowercase (self : int ):
self.config_tester.run_common_tests()
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowercase (self : Any ):
pass
def _lowercase (self : int ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _lowercase (self : Optional[int] ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase (self : Tuple ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase (self : int ):
pass
@slow
def _lowercase (self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowercase (self : List[str] , __a : Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
| 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
"""simple docstring"""
import requests
lowercase__ = """""" # <-- Put your OpenWeatherMap appid here!
lowercase__ = """https://api.openweathermap.org/data/2.5/"""
def __lowerCamelCase ( __UpperCamelCase = "Chicago" , __UpperCamelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def __lowerCamelCase ( __UpperCamelCase = "Kolkata, India" , __UpperCamelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def __lowerCamelCase ( __UpperCamelCase = 55.68 , __UpperCamelCase = 12.57 , __UpperCamelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowercase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 161 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 161 | 1 |
import numpy as np
class lowercase__ :
def __init__( self : Any ):
SCREAMING_SNAKE_CASE__ = (0, 0)
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
def __eq__( self : Any , UpperCAmelCase_ : str ):
return self.position == cell.position
def A_ ( self : str ):
print(self.position )
class lowercase__ :
def __init__( self : str , UpperCAmelCase_ : List[Any]=(5, 5) ):
SCREAMING_SNAKE_CASE__ = np.zeros(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = world_size[0]
SCREAMING_SNAKE_CASE__ = world_size[1]
def A_ ( self : List[str] ):
print(self.w )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE__ = cell.position[0]
SCREAMING_SNAKE_CASE__ = cell.position[1]
SCREAMING_SNAKE_CASE__ = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE__ = current_x + n[0]
SCREAMING_SNAKE_CASE__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE__ = Cell()
SCREAMING_SNAKE_CASE__ = (x, y)
SCREAMING_SNAKE_CASE__ = cell
neighbours.append(UpperCAmelCase_ )
return neighbours
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
_open.append(UpperCamelCase_ )
while _open:
SCREAMING_SNAKE_CASE__ = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE__ = _open[min_f]
_closed.append(_open.pop(UpperCamelCase_ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCamelCase_ ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE__ = current.g + 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = n.position
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = goal.position
SCREAMING_SNAKE_CASE__ = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__snake_case = Gridworld()
# Start position and goal
__snake_case = Cell()
__snake_case = (0, 0)
__snake_case = Cell()
__snake_case = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__snake_case = 1
print(world.w)
| 176 |
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176 | 1 |
import os
import sys
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__SCREAMING_SNAKE_CASE : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> int:
'''simple docstring'''
return AutoModel.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> List[Any]:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Tuple:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Tuple:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__lowercase , **__lowercase ) | 284 | def snake_case (__lowercase ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__lowercase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod() | 284 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = ProphetNetTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = False
def a ( self ):
super().setUp()
snake_case_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a ( self , snake_case ):
snake_case_ = """UNwant\u00E9d,running"""
snake_case_ = """unwanted, running"""
return input_text, output_text
def a ( self ):
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a ( self ):
snake_case_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a ( self ):
snake_case_ = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a ( self ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case_ = {}
for i, token in enumerate(__UpperCAmelCase ):
snake_case_ = i
snake_case_ = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a ( self ):
snake_case_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
snake_case_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case_ = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
snake_case_ = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
snake_case_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a ( self ):
snake_case_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
snake_case_ = tokenizer.encode('sequence builders' , add_special_tokens=__UpperCAmelCase )
snake_case_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__UpperCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 285 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Optional[Any] = True
elif char.isupper():
lowerCAmelCase__ : Any = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 0 |
import functools
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = len(lowerCamelCase_ )
A__ = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowercase_ , lowercase_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if "cls_token" in name:
A__ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
A__ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
A__ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A__ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A__ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
A__ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
A__ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A__ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A__ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
A__ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
A__ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[1] )
if "decoder_blocks" in key:
A__ = config.decoder_hidden_size
A__ = '''decoder.decoder_layers.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = config.hidden_size
A__ = '''vit.encoder.layer.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ViTMAEConfig()
if "large" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
elif "huge" in checkpoint_url:
A__ = 14
A__ = 1_280
A__ = 5_120
A__ = 32
A__ = 16
A__ = ViTMAEForPreTraining(lowercase_ )
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )['''model''']
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
A__ = model(**lowercase_ )
A__ = outputs.logits
if "large" in checkpoint_url:
A__ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
A__ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
A__ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 231 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : str )-> Any:
snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , __snake_case : int )-> Optional[Any]:
snake_case , snake_case , snake_case , snake_case = hidden_states.shape
snake_case = jax.image.resize(
_a , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
snake_case = self.conv(_a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : str )-> Optional[Any]:
snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , __snake_case : List[str] )-> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
snake_case = self.conv(_a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
snake_case_ = 0.0
snake_case_ = None
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case = self.in_channels if self.out_channels is None else self.out_channels
snake_case = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case = nn.Dense(_a , dtype=self.dtype )
snake_case = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case = nn.Dropout(self.dropout_prob )
snake_case = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
snake_case = None
if use_nin_shortcut:
snake_case = nn.Conv(
_a , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple=True )-> Tuple:
snake_case = hidden_states
snake_case = self.norma(_a )
snake_case = nn.swish(_a )
snake_case = self.conva(_a )
snake_case = self.time_emb_proj(nn.swish(_a ) )
snake_case = jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 )
snake_case = hidden_states + temb
snake_case = self.norma(_a )
snake_case = nn.swish(_a )
snake_case = self.dropout(_a , _a )
snake_case = self.conva(_a )
if self.conv_shortcut is not None:
snake_case = self.conv_shortcut(_a )
return hidden_states + residual
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
snake_case : str = (
'Expected a list of numbers as input, found '
f"""{type(_UpperCAmelCase ).__name__}"""
)
raise TypeError(_UpperCAmelCase )
else:
snake_case : Optional[Any] = f"""Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"""
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50 | 0 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_lowercase = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
_lowercase = """|""".join(sys.argv[1:])
_lowercase = re.compile(RF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 229 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _lowercase , _lowercase = None , _lowercase = None ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCAmelCase = torch.zeros(_lowercase , _lowercase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = torch.nn.Parameter(_lowercase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : VQModel
_lowercase : CLIPTextModel
_lowercase : CLIPTokenizer
_lowercase : TransformeraDModel
_lowercase : LearnedClassifierFreeSamplingEmbeddings
_lowercase : VQDiffusionScheduler
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
_lowerCAmelCase = [""""""] * batch_size
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = negative_prompt_embeds.shape[1]
_lowerCAmelCase = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
_lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 100 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = 1
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = len(_lowercase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowercase )}.' )
# get the initial completely masked latents unless the user supplied it
_lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCAmelCase = self.transformer.num_vector_embeds - 1
_lowerCAmelCase = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
_lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCAmelCase = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = model_output.chunk(2 )
_lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
_lowerCAmelCase = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
_lowerCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = self.vqvae.config.vq_embed_dim
_lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
_lowerCAmelCase = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = torch.sort(_lowercase , 1 , descending=_lowercase )
_lowerCAmelCase = torch.exp(_lowercase )
_lowerCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
_lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCAmelCase = keep_mask[:, :-1, :]
_lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCAmelCase = log_p_x_0.clone()
_lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 229 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Tuple = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = KandinskyImgaImgPipeline
UpperCAmelCase__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
UpperCAmelCase__ : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
UpperCAmelCase__ : Union[str, Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCAmelCase__ : Any = False
@property
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self :Optional[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self :Tuple ) -> Tuple:
'''simple docstring'''
__A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__A = MultilingualCLIP(_A )
__A = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__A = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__A = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_unet
__A = self.dummy_movq
__A = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__A = DDIMScheduler(**_A )
__A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self :Dict , _A :Union[str, Any] , _A :Optional[int]=0 ) -> str:
'''simple docstring'''
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
__A = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = pipe(**self.get_dummy_inputs(_A ) )
__A = output.images
__A = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__A = 'A red cartoon frog, 4k'
__A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
__A = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A , __A = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__A = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 161 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase__ = key.replace('''module.encoder''', '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase__ = key.replace('''module.decoder''', '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase__ = key.replace(F"""patch_embed{idx}""", F"""patch_embeddings.{int(UpperCamelCase__ )-1}""" )
if "norm" in key:
UpperCamelCase__ = key.replace('''norm''', '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase__ = key.replace(F"""layer_norm{idx}""", F"""layer_norm.{int(UpperCamelCase__ )-1}""" )
if "layer_norm1" in key:
UpperCamelCase__ = key.replace('''layer_norm1''', '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase__ = key.replace('''layer_norm2''', '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase__ = key.replace(F"""block{idx}""", F"""block.{int(UpperCamelCase__ )-1}""" )
if "attn.q" in key:
UpperCamelCase__ = key.replace('''attn.q''', '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase__ = key.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase__ = key.replace('''attn''', '''attention.self''' )
if "fc1" in key:
UpperCamelCase__ = key.replace('''fc1''', '''dense1''' )
if "fc2" in key:
UpperCamelCase__ = key.replace('''fc2''', '''dense2''' )
if "linear_pred" in key:
UpperCamelCase__ = key.replace('''linear_pred''', '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase__ = key.replace('''linear_fuse.conv''', '''linear_fuse''' )
UpperCamelCase__ = key.replace('''linear_fuse.bn''', '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase__ = key.replace(F"""linear_c{idx}""", F"""linear_c.{int(UpperCamelCase__ )-1}""" )
if "bot_conv" in key:
UpperCamelCase__ = key.replace('''bot_conv''', '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase__ = key.replace('''skip_conv1''', '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase__ = key.replace('''skip_conv2''', '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase__ = key.replace('''fusion1''', '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase__ = key.replace('''fusion2''', '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase__ = key.replace('''fusion3''', '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase__ = key.replace('''conv''', '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase__ = key.replace('''module.last_layer_depth''', '''head.head''' )
UpperCamelCase__ = value
return new_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : Any, UpperCamelCase__ : str ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCamelCase__ = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
UpperCamelCase__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase__ = GLPNImageProcessor()
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase__ = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__, UpperCamelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase__ = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
UpperCamelCase__ = model(UpperCamelCase__ )
UpperCamelCase__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase__ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase__ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], UpperCamelCase__, atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__, UpperCamelCase__ ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__, UpperCamelCase__ ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
lowercase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 354 | import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowercase = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
lowercase = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
lowercase = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Dict ):
'''simple docstring'''
UpperCamelCase__ = simple_accuracy(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = float(fa_score(y_true=UpperCamelCase__, y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = np.array(UpperCamelCase__ )
UpperCamelCase__ = np.array(UpperCamelCase__ )
UpperCamelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCamelCase__ = en_sentvecs - np.mean(UpperCamelCase__, axis=0 )
UpperCamelCase__ = in_sentvecs - np.mean(UpperCamelCase__, axis=0 )
UpperCamelCase__ = cdist(UpperCamelCase__, UpperCamelCase__, '''cosine''' )
UpperCamelCase__ = np.array(range(UpperCamelCase__ ) )
UpperCamelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCamelCase__ = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def A_ ( self : Optional[Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def A_ ( self : str , _a : Dict , _a : Tuple ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_a , _a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_a , _a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 35 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ : str = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 225 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase__ : str = 'pytorch_model.bin'
lowerCamelCase__ : int = 'pytorch_model.bin.index.json'
lowerCamelCase__ : List[Any] = 'adapter_config.json'
lowerCamelCase__ : Optional[int] = 'adapter_model.bin'
lowerCamelCase__ : Dict = 'adapter_model.safetensors'
lowerCamelCase__ : List[str] = 'tf_model.h5'
lowerCamelCase__ : Tuple = 'tf_model.h5.index.json'
lowerCamelCase__ : Optional[int] = 'model.ckpt'
lowerCamelCase__ : Any = 'flax_model.msgpack'
lowerCamelCase__ : List[Any] = 'flax_model.msgpack.index.json'
lowerCamelCase__ : int = 'model.safetensors'
lowerCamelCase__ : str = 'model.safetensors.index.json'
lowerCamelCase__ : Union[str, Any] = 'config.json'
lowerCamelCase__ : List[Any] = 'preprocessor_config.json'
lowerCamelCase__ : Optional[int] = FEATURE_EXTRACTOR_NAME
lowerCamelCase__ : Any = 'generation_config.json'
lowerCamelCase__ : Any = 'modelcard.json'
lowerCamelCase__ : List[str] = '▁'
lowerCamelCase__ : Union[str, Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase__ : Optional[int] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase__ : str = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase__ : Dict = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> int:
if version.parse(__UpperCAmelCase ) < version.parse(__UpperCAmelCase ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
SCREAMING_SNAKE_CASE_ = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' ) | 225 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'depth_multiplier' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : int=0.25 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=1_0_2_4 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Optional[int]="relu6" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : Union[str, Any]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = depth_multiplier
__lowerCAmelCase = min_depth
__lowerCAmelCase = tf_padding
__lowerCAmelCase = int(last_hidden_size * depth_multiplier )
__lowerCAmelCase = output_stride
__lowerCAmelCase = hidden_act
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = use_labels
__lowerCAmelCase = is_training
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
def lowercase ( self : List[str] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase ( self : Optional[int] ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MobileNetVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileNetVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase = MobileNetVaModelTester(self )
__lowerCAmelCase = MobileNetVaConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def lowercase ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def lowercase ( self : Any ) -> List[Any]:
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
pass
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Dict:
def check_hidden_states_output(lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = 2_6
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> str:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MobileNetVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Any ) -> Optional[Any]:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def lowercase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 359 |
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = 2
__lowercase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A__ )
if n > 1:
factors.append(A__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 104 | 1 |
'''simple docstring'''
import copy
import re
class a__ :
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'hp'
__UpperCamelCase : Any = {}
__UpperCamelCase : List[str] = None
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = prefix
__lowerCAmelCase = defaults
cls.build_naming_info()
@staticmethod
def _snake_case (__lowercase , __lowercase ):
if len(__lowercase ) == 0:
return ""
__lowerCAmelCase = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowercase ) + 1 ):
__lowerCAmelCase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__lowerCAmelCase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowercase ):
__lowerCAmelCase = ''''''
while integer != 0:
__lowerCAmelCase = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
__lowerCAmelCase = 0
while True:
__lowerCAmelCase = word + '''#''' + int_to_alphabetic(__lowercase )
if sword in info["reverse_short_word"]:
continue
else:
__lowerCAmelCase = sword
break
__lowerCAmelCase = short_word
__lowerCAmelCase = word
return short_word
@staticmethod
def _snake_case (__lowercase , __lowercase ):
__lowerCAmelCase = param_name.split('''_''' )
__lowerCAmelCase = [TrialShortNamer.shortname_for_word(__lowercase , __lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__lowerCAmelCase = ['''''', '''_''']
for separator in separators:
__lowerCAmelCase = separator.join(__lowercase )
if shortname not in info["reverse_short_param"]:
__lowerCAmelCase = shortname
__lowerCAmelCase = param_name
return shortname
return param_name
@staticmethod
def _snake_case (__lowercase , __lowercase ):
__lowerCAmelCase = TrialShortNamer.shortname_for_key(__lowercase , __lowercase )
__lowerCAmelCase = short_name
__lowerCAmelCase = param_name
@classmethod
def _snake_case (cls ):
if cls.NAMING_INFO is not None:
return
__lowerCAmelCase = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__lowerCAmelCase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowercase , __lowercase )
__lowerCAmelCase = info
@classmethod
def _snake_case (cls , __lowercase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__lowerCAmelCase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__lowerCAmelCase = cls.NAMING_INFO['''short_param'''][k]
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = 1 if v else 0
__lowerCAmelCase = '''''' if isinstance(__lowercase , (int, float) ) else '''-'''
__lowerCAmelCase = F"""{key}{sep}{v}"""
name.append(__lowercase )
return "_".join(__lowercase )
@classmethod
def _snake_case (cls , __lowercase ):
__lowerCAmelCase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__lowerCAmelCase = []
else:
__lowerCAmelCase = repr.split('''_''' )
__lowerCAmelCase = {}
for value in values:
if "-" in value:
__lowerCAmelCase , __lowerCAmelCase = value.split('''-''' )
else:
__lowerCAmelCase = re.sub('''[0-9.]''' , '''''' , __lowercase )
__lowerCAmelCase = float(re.sub('''[^0-9.]''' , '''''' , __lowercase ) )
__lowerCAmelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__lowerCAmelCase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__lowerCAmelCase = cls.DEFAULTS[k]
return parameters
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
from sklearn.metrics import fa_score
import datasets
A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
A__ = fa_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase )
return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
| 274 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
set_seed(770)
__A : List[str] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__A : Dict = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__A : Dict = os.path.dirname(os.path.abspath(__file__))
__A : List[str] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__A : Optional[Any] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(_UpperCAmelCase, REMOTE_MODEL_PATHS[key]['file_name'] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase )
hf_hub_download(repo_id=_UpperCAmelCase, filename=_UpperCAmelCase, local_dir=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase="text" ) -> Optional[int]:
'''simple docstring'''
if model_type == "text":
lowerCAmelCase : Optional[Any] = BarkSemanticModel
lowerCAmelCase : List[Any] = BarkSemanticConfig
lowerCAmelCase : str = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase : Optional[Any] = BarkCoarseModel
lowerCAmelCase : List[str] = BarkCoarseConfig
lowerCAmelCase : List[str] = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase : int = BarkFineModel
lowerCAmelCase : List[str] = BarkFineConfig
lowerCAmelCase : List[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase : Optional[Any] = f"{model_type}_small" if use_small else model_type
lowerCAmelCase : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_UpperCAmelCase ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'], model_info['file_name'] )
lowerCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase, map_location=_UpperCAmelCase )
# this is a hack
lowerCAmelCase : Any = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase : Union[str, Any] = model_args['vocab_size']
lowerCAmelCase : List[Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase : Dict = model_args.pop('n_head' )
lowerCAmelCase : Tuple = model_args.pop('n_embd' )
lowerCAmelCase : Union[str, Any] = model_args.pop('n_layer' )
lowerCAmelCase : List[str] = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase : List[str] = ModelClass(config=_UpperCAmelCase )
lowerCAmelCase : List[str] = GenerationConfigClass()
lowerCAmelCase : List[str] = model_generation_config
lowerCAmelCase : Union[str, Any] = checkpoint['model']
# fixup checkpoint
lowerCAmelCase : Tuple = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_UpperCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase : str = k[len(_UpperCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase : int = new_k.replace(_UpperCAmelCase, new_layer_name_dict[old_layer_name] )
lowerCAmelCase : List[str] = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase : Dict = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_UpperCAmelCase ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(_UpperCAmelCase ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase )
lowerCAmelCase : List[Any] = model.num_parameters(exclude_embeddings=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = checkpoint['best_val_loss'].item()
logger.info(f"model loaded: {round(n_params/1e6, 1 )}M params, {round(_UpperCAmelCase, 3 )} loss" )
model.eval()
model.to(_UpperCAmelCase )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase="text" ) -> Any:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase : Tuple = 'cpu' # do conversion on cpu
lowerCAmelCase : Optional[Any] = _get_ckpt_path(_UpperCAmelCase, use_small=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _load_model(_UpperCAmelCase, _UpperCAmelCase, model_type=_UpperCAmelCase, use_small=_UpperCAmelCase )
# load bark initial model
lowerCAmelCase : Any = _bark_load_model(_UpperCAmelCase, 'cpu', model_type=_UpperCAmelCase, use_small=_UpperCAmelCase )
if model_type == "text":
lowerCAmelCase : str = bark_model['model']
if model.num_parameters(exclude_embeddings=_UpperCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase : Optional[int] = 5
lowerCAmelCase : Tuple = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase : str = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
lowerCAmelCase : Any = bark_model(_UpperCAmelCase )[0]
lowerCAmelCase : Tuple = model(_UpperCAmelCase )
# take last logits
lowerCAmelCase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : Dict = 8
lowerCAmelCase : Dict = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
lowerCAmelCase : int = model(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : str = bark_model(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Dict = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : str = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : int = BarkSemanticConfig.from_pretrained(os.path.join(_UpperCAmelCase, 'config.json' ) )
lowerCAmelCase : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_UpperCAmelCase, 'config.json' ) )
lowerCAmelCase : str = BarkFineConfig.from_pretrained(os.path.join(_UpperCAmelCase, 'config.json' ) )
lowerCAmelCase : Union[str, Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase : Union[str, Any] = BarkSemanticModel.from_pretrained(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = BarkCoarseModel.from_pretrained(_UpperCAmelCase )
lowerCAmelCase : Any = BarkFineModel.from_pretrained(_UpperCAmelCase )
lowerCAmelCase : Dict = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase : Union[str, Any] = BarkConfig.from_sub_model_configs(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Optional[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
lowerCAmelCase : List[str] = BarkModel(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = semantic
lowerCAmelCase : Optional[Any] = coarseAcoustic
lowerCAmelCase : int = fineAcoustic
lowerCAmelCase : Any = codec
lowerCAmelCase : Any = bark_generation_config
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
bark.save_pretrained(_UpperCAmelCase, repo_id=_UpperCAmelCase, push_to_hub=_UpperCAmelCase )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__A : Any = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 323 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def a ( __a ) -> datetime:
'''simple docstring'''
UpperCamelCase__ :Tuple = year % 19
UpperCamelCase__ :Optional[Any] = year % 4
UpperCamelCase__ :Dict = year % 7
UpperCamelCase__ :Union[str, Any] = math.floor(year / 100 )
UpperCamelCase__ :List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase__ :List[str] = leap_day_inhibits / 4
UpperCamelCase__ :Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase__ :List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase__ :List[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase__ :Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__snake_case = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""") | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = psutil.Process()
SCREAMING_SNAKE_CASE = False
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = -1
while True:
SCREAMING_SNAKE_CASE = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE = True
self.thread.start()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = False
self.thread.join()
return self.cpu_memory_peak
__UpperCamelCase = PeakCPUMemory()
def lowercase () -> Optional[int]:
# Time
SCREAMING_SNAKE_CASE = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
# Time
SCREAMING_SNAKE_CASE = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
SCREAMING_SNAKE_CASE = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
SCREAMING_SNAKE_CASE = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
return measures
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
print(F'{description}:' )
print(F'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(F'- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE_ )]:.2f}MiB' )
SCREAMING_SNAKE_CASE = measures[F'{i}-peak']
print(F'- GPU {i} peak: {peak:.2f}MiB' )
print(F'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(F'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 38 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.q_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.k_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.v_proj.' )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__UpperCamelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 38 | 1 |
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowercase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 | from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : dict ,__lowercase : str ,__lowercase : set ,__lowercase : set ,__lowercase : dict ,__lowercase : dict ,__lowercase : PriorityQueue ,__lowercase : dict ,__lowercase : float | int ,):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : List[str] = cst_fwd.get(__lowercase ,np.inf )
A_ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Any = new_cost_f
A_ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : List[str] = -1
A_ : List[Any] = set()
A_ : Union[str, Any] = set()
A_ : int = {source: 0}
A_ : List[Any] = {destination: 0}
A_ : Dict = {source: None}
A_ : Optional[int] = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : List[str] = queue_forward.get()
visited_forward.add(__lowercase )
A_ , A_ : Union[str, Any] = queue_backward.get()
visited_backward.add(__lowercase )
A_ : int = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
A_ : str = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : int = shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'sentencepiece.model'}
snake_case_ : Any = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
snake_case_ : List[str] = {
'google/rembert': 256,
}
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]="[CLS]" ,lowerCamelCase__ : List[str]="[SEP]" ,lowerCamelCase__ : Union[str, Any]="[UNK]" ,lowerCamelCase__ : List[Any]="[SEP]" ,lowerCamelCase__ : Dict="[PAD]" ,lowerCamelCase__ : List[Any]="[CLS]" ,lowerCamelCase__ : List[Any]="[MASK]" ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : List[str] = do_lower_case
_UpperCamelCase : List[Any] = remove_space
_UpperCamelCase : Optional[Any] = keep_accents
_UpperCamelCase : str = vocab_file
_UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.__dict__.copy()
_UpperCamelCase : Any = None
return state
def __setstate__( self : List[str] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = d
_UpperCamelCase : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int=False ):
'''simple docstring'''
_UpperCamelCase : Dict = self.sp_model.EncodeAsPieces(lowerCamelCase__ )
return pieces
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.sp_model.decode_pieces(lowerCamelCase__ )
return out_string
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Dict = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 236 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
_UpperCamelCase : Any = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : int = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 236 | 1 |
"""simple docstring"""
__UpperCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__UpperCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__UpperCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 84 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase__ = NewType('DataClass', Any)
UpperCAmelCase__ = NewType('DataClassType', Any)
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _UpperCAmelCase ( __lowerCamelCase : list ) -> Callable[[str], Any]:
_snake_case = {str(__lowerCamelCase ): choice for choice in choices}
return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( *,
__lowerCamelCase : Union[str, List[str]] = None , __lowerCamelCase : str = None , __lowerCamelCase : Any = dataclasses.MISSING , __lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , __lowerCamelCase : dict = None , **__lowerCamelCase : str , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__lowerCamelCase , default=__lowerCamelCase , default_factory=__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__ ( A_ ):
__a = 42
def __init__( self : Any , _lowerCamelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCamelCase : List[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCamelCase )
if dataclasses.is_dataclass(_lowerCamelCase ):
_snake_case = [dataclass_types]
_snake_case = list(_lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCamelCase )
@staticmethod
def lowercase ( _lowerCamelCase : ArgumentParser , _lowerCamelCase : dataclasses.Field ):
_snake_case = f'''--{field.name}'''
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_snake_case = kwargs.pop('''aliases''' , [] )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_lowerCamelCase , '''UnionType''' ) and isinstance(_lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f''' Problem encountered in field \'{field.name}\'.''' )
if type(_lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(_lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , _lowerCamelCase ) and issubclass(field.type , _lowerCamelCase )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(_lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(_lowerCamelCase ) and issubclass(_lowerCamelCase , _lowerCamelCase ):
_snake_case = field.type.__args__[0]
_snake_case = '''+'''
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(f'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **_lowerCamelCase )
def lowercase ( self : Any , _lowerCamelCase : DataClassType ):
if hasattr(_lowerCamelCase , '''_argument_group_name''' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(_lowerCamelCase )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCamelCase ):
_snake_case = '''.'''.join(map(_lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_lowerCamelCase ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Dict=False , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : int=None , _lowerCamelCase : Tuple=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(_lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(_lowerCamelCase , type=_lowerCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=_lowerCamelCase )
_snake_case = vars(_lowerCamelCase ).get(args_file_flag.lstrip('''-''' ) , _lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCamelCase ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=_lowerCamelCase )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(_lowerCamelCase ) if f.init}
_snake_case = {k: v for k, v in vars(_lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCamelCase , _lowerCamelCase )
_snake_case = dtype(**_lowerCamelCase )
outputs.append(_lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def lowercase ( self : int , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : bool = False ):
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(_lowerCamelCase ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**_lowerCamelCase )
outputs.append(_lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(_lowerCamelCase )}''' )
return tuple(_lowerCamelCase )
def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : bool = False ):
with open(Path(_lowerCamelCase ) , encoding='''utf-8''' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(_lowerCamelCase , allow_extra_keys=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : bool = False ):
_snake_case = self.parse_dict(yaml.safe_load(Path(_lowerCamelCase ).read_text() ) , allow_extra_keys=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 357 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
__a = MODEL_FOR_MASKED_LM_MAPPING
__a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase ( self : Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase ( self : Tuple ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowercase ( self : List[str] ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
_snake_case = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowercase ( self : Optional[Any] ):
_snake_case = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
_snake_case = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
@require_torch
def lowercase ( self : Dict ):
_snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_lowerCamelCase )
@slow
@require_tf
def lowercase ( self : Tuple ):
_snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Optional[int] ):
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowercase ( self : str ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
_snake_case = None
_snake_case = None
self.run_pipeline_test(_lowerCamelCase , [] )
@require_tf
def lowercase ( self : Any ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
_snake_case = None
_snake_case = None
self.run_pipeline_test(_lowerCamelCase , [] )
def lowercase ( self : int , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
_snake_case = fill_masker.tokenizer
_snake_case = fill_masker.model
_snake_case = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowerCamelCase , [
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
] , )
with self.assertRaises(_lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowerCamelCase ):
fill_masker('''This is''' )
self.run_test_top_k(_lowerCamelCase , _lowerCamelCase )
self.run_test_targets(_lowerCamelCase , _lowerCamelCase )
self.run_test_top_k_targets(_lowerCamelCase , _lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCamelCase , _lowerCamelCase )
self.fill_mask_with_multiple_masks(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
_snake_case = tokenizer.get_vocab()
_snake_case = sorted(vocab.keys() )[:2]
# Pipeline argument
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , targets=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase )
_snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) )
# Call argument
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase )
_snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) )
# Score equivalence
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
_snake_case = [top_mask['''token_str'''] for top_mask in outputs]
_snake_case = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCamelCase ) == set(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
_snake_case = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def lowercase ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Tuple ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , top_k=2 )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
_snake_case = tokenizer.get_vocab()
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
# top_k=2, ntargets=3
_snake_case = sorted(vocab.keys() )[:3]
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_snake_case = [el['''token_str'''] for el in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCamelCase ).issubset(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = tokenizer.get_vocab()
# String duplicates + id duplicates
_snake_case = sorted(vocab.keys() )[:3]
_snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCamelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCamelCase ) , 3 )
def lowercase ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
] , )
| 40 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
return "".join(sorted(A ) )
def _SCREAMING_SNAKE_CASE (A ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(A )]
lowerCamelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase : List[Any] = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 2 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *_UpperCAmelCase : List[str] ):
'''simple docstring'''
with open(_UpperCAmelCase , '''r''' ) as fh:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN )
__A =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__A =torch.device("cuda", local_rank)
__A =socket.gethostname()
__A =f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A =dist.get_rank()
__A =dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 226 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = [0]
__UpperCamelCase = [0]
__UpperCamelCase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 )
__UpperCamelCase = [60]
__UpperCamelCase = [10]
__UpperCamelCase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 3
__UpperCamelCase = [1, 2, 3]
__UpperCamelCase = [3, 2, 1]
__UpperCamelCase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 5 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 50
__UpperCamelCase = [60, 100, 120]
__UpperCamelCase = [10, 20, 30]
__UpperCamelCase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 368 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Any:
__UpperCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 2_0, 'a ' * 3_0, 'b ' * 7],
}
__UpperCamelCase = Dataset.from_dict(snake_case )
return dataset
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase = make_duplicate_clusters(__UpperCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase , __UpperCamelCase = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __UpperCAmelCase )
| 263 | 0 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = num % 8
lowerCamelCase_ = octal + (remainder * math.floor(math.pow(10 , UpperCAmelCase_ ) ))
counter += 1
lowerCamelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(UpperCAmelCase_ )}'''
def __snake_case ( ):
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 55 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121 | 0 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : list, _lowerCAmelCase : int = 0 ):
"""simple docstring"""
_a = length or len(_lowerCAmelCase )
_a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_a , _a = list_data[i + 1], list_data[i]
_a = True
return list_data if not swapped else bubble_sort(_lowerCAmelCase, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 153 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> str:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_multiple_size
_a = hidden_act
_a = hidden_dropout
_a = attention_dropout
_a = weight_tying
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = True
_a = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = True
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
_a = output_from_no_past['''hidden_states'''][0]
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Tuple = False
A_ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = GPTNeoXJapaneseModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''abeja/gpt-neox-japanese-2.7b'''
_a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_a = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_a = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
_a = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
_a = []
for prompt in prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=50 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) | 153 | 1 |
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
__lowerCAmelCase: Tuple = subparsers.add_parser('test' )
else:
__lowerCAmelCase: List[str] = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=A_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__lowerCAmelCase: Tuple = script_name
else:
__lowerCAmelCase: List[Any] = f'''--config_file={args.config_file} {script_name}'''
__lowerCAmelCase: Tuple = ['''accelerate-launch'''] + test_args.split()
__lowerCAmelCase: List[Any] = execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _a ( ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: Tuple = test_command_parser()
__lowerCAmelCase: List[Any] = parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 322 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = """"""
else:
UpperCAmelCase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = dct.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = val
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = ViTHybridConfig(backbone_config=SCREAMING_SNAKE_CASE__ , image_size=384 , num_labels=1000 )
UpperCAmelCase__ = False
# load original model from timm
UpperCAmelCase__ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = create_rename_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = """huggingface/label-files"""
UpperCAmelCase__ = """imagenet-1k-id2label.json"""
UpperCAmelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase__ = ViTHybridModel(SCREAMING_SNAKE_CASE__ ).eval()
else:
UpperCAmelCase__ = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# create image processor
UpperCAmelCase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = transform.transforms
UpperCAmelCase__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase__ = ViTHybridImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = transform(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# verify logits
with torch.no_grad():
UpperCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase__ = timm_model.forward_features(SCREAMING_SNAKE_CASE__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.pooler_output , atol=1e-3 )
else:
UpperCAmelCase__ = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 61 |
'''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = len(grid[0] )
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(n_rows - 3 ):
UpperCAmelCase__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase__ = max(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if max_product > largest:
UpperCAmelCase__ = max_product
return largest
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
UpperCAmelCase__ = [[int(SCREAMING_SNAKE_CASE__ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE__ ) )]
return largest_product(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 61 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> list[float]:
lowercase__ , lowercase__ : str = coefficient_matrix.shape
lowercase__ , lowercase__ : Any = constant_matrix.shape
if rowsa != colsa:
lowercase__ : Union[str, Any] = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__lowerCamelCase )
if colsa != 1:
lowercase__ : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__lowerCamelCase )
if rowsa != rowsa:
lowercase__ : Dict = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != rowsa:
lowercase__ : List[Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"""matrix but received {len(__lowerCamelCase )} and {rowsa}"""
)
raise ValueError(__lowerCamelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowercase__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase__ , lowercase__ : Dict = table.shape
strictly_diagonally_dominant(__lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCamelCase ):
lowercase__ : Optional[Any] = []
for row in range(__lowerCamelCase ):
lowercase__ : Optional[int] = 0
for col in range(__lowerCamelCase ):
if col == row:
lowercase__ : List[Any] = table[row][col]
elif col == cols - 1:
lowercase__ : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase__ : Any = (temp + val) / denom
new_val.append(__lowerCamelCase )
lowercase__ : int = new_val
return [float(__lowerCamelCase ) for i in new_val]
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ , lowercase__ : Tuple = table.shape
lowercase__ : Dict = True
for i in range(0 , __lowerCamelCase ):
lowercase__ : List[str] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=_lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase = controlnet_params
__UpperCAmelCase = '''bird'''
__UpperCAmelCase = jax.device_count()
__UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__UpperCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(_lowercase , jax.device_count() )
__UpperCAmelCase = replicate(_lowercase )
__UpperCAmelCase = shard(_lowercase )
__UpperCAmelCase = shard(_lowercase )
__UpperCAmelCase = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a ( self : Any ):
__UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=_lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase = controlnet_params
__UpperCAmelCase = '''Chef in the kitchen'''
__UpperCAmelCase = jax.device_count()
__UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__UpperCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(_lowercase , jax.device_count() )
__UpperCAmelCase = replicate(_lowercase )
__UpperCAmelCase = shard(_lowercase )
__UpperCAmelCase = shard(_lowercase )
__UpperCAmelCase = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 86 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Tuple = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_lowercase : str = {'allegro/herbert-base-cased': 5_14}
_lowercase : Tuple = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = HerbertTokenizer
def __init__( self : List[Any] , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : str="<s>" , _lowercase : List[str]="<unk>" , _lowercase : int="<pad>" , _lowercase : str="<mask>" , _lowercase : List[Any]="</s>" , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = filter(lambda UpperCAmelCase_ : p.requires_grad , model.parameters() )
lowerCamelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ : List[str] = logging.getLogger(__name__)
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
if metric == "rouge2":
lowerCamelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowerCamelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowerCamelCase_ = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
lowerCamelCase_ = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
lowerCamelCase_ = ModelCheckpoint(
dirpath=UpperCAmelCase_ , filename=UpperCAmelCase_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=UpperCAmelCase_ , verbose=UpperCAmelCase_ , )
class snake_case ( pl.Callback ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=True ):
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowerCamelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowerCamelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase_ = od / "test_results.txt"
lowerCamelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowerCamelCase_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , "a+" ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase_ = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
lowerCamelCase_ = val.item()
lowerCamelCase_ = f'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCamelCase )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
lowerCamelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase_ = pl_module.model.num_parameters()
lowerCamelCase_ = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , "test" )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 55 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> str:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> Any:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
@slow
def __A ( self ) -> int:
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
def __A ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('bert-base' )
def __A ( self ) -> str:
with self.assertRaisesRegex(
lowerCAmelCase__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCAmelCase__ , revision='aaaaaa' )
def __A ( self ) -> int:
with self.assertRaisesRegex(
lowerCAmelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __A ( self ) -> List[str]:
with self.assertRaisesRegex(lowerCAmelCase__ , 'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 38 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = """resnet"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""basic""", """bottleneck"""]
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=[256, 512, 1_024, 2_048] , lowerCAmelCase__=[3, 4, 6, 3] , lowerCAmelCase__="bottleneck" , lowerCAmelCase__="relu" , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ['stem'] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __A ( self ) -> float:
return 1e-3
| 38 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = 50 ) -> int:
__lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 91 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ ):
@register_to_config
def __init__( self : Optional[int] , _lowercase : int = 1_28 , _lowercase : int = 2_56 , _lowercase : float = 20_00.0 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 64 , _lowercase : int = 20_48 , _lowercase : float = 0.1 , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE__ = nn.Embedding(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE__ = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def __a ( self : str , _lowercase : Any , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self : Optional[int] , _lowercase : int , _lowercase : Tuple , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE__ = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE__ = self.position_encoding(_lowercase )
SCREAMING_SNAKE_CASE__ = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
SCREAMING_SNAKE_CASE__ = self.dropout(_lowercase )
# decoder: No padding present.
SCREAMING_SNAKE_CASE__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE__ = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE__ = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
SCREAMING_SNAKE_CASE__ = self.decoder_norm(_lowercase )
SCREAMING_SNAKE_CASE__ = self.post_dropout(_lowercase )
SCREAMING_SNAKE_CASE__ = self.spec_out(_lowercase )
return spec_out
class __snake_case ( nn.Module ):
def __init__( self : List[str] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple=1E-6 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def __a ( self : List[Any] , _lowercase : int , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE__ = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE__ = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self : List[str] , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_lowercase )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
SCREAMING_SNAKE_CASE__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_lowercase )
def __a ( self : List[str] , _lowercase : Dict , _lowercase : str=None , _lowercase : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
SCREAMING_SNAKE_CASE__ = self.attention(_lowercase )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_lowercase , eps=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_lowercase )
def __a ( self : Union[str, Any] , _lowercase : int , _lowercase : str=None , _lowercase : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer_norm(_lowercase )
SCREAMING_SNAKE_CASE__ = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(_lowercase )
return layer_output
class __snake_case ( nn.Module ):
def __init__( self : Tuple , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_lowercase , eps=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_lowercase )
def __a ( self : Union[str, Any] , _lowercase : int , _lowercase : Any=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.film(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = self.DenseReluDense(_lowercase )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : int , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_lowercase )
SCREAMING_SNAKE_CASE__ = NewGELUActivation()
def __a ( self : List[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.act(self.wi_a(_lowercase ) )
SCREAMING_SNAKE_CASE__ = self.wi_a(_lowercase )
SCREAMING_SNAKE_CASE__ = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE__ = self.dropout(_lowercase )
SCREAMING_SNAKE_CASE__ = self.wo(_lowercase )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Tuple , _lowercase : Any , _lowercase : int=1E-6 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.ones(_lowercase ) )
SCREAMING_SNAKE_CASE__ = eps
def __a ( self : List[str] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
SCREAMING_SNAKE_CASE__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def __a ( self : Union[str, Any] , _lowercase : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(_lowercase , 3.0 )) ))
class __snake_case ( nn.Module ):
def __init__( self : str , _lowercase : Dict , _lowercase : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def __a ( self : Dict , _lowercase : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scale_bias(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torch.chunk(_lowercase , 2 , -1 )
SCREAMING_SNAKE_CASE__ = x * (1 + scale) + shift
return x
| 204 | from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case :
def __init__( self : Any , _lowercase : Tuple , _lowercase : str=2 , _lowercase : List[Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=2 , _lowercase : str=7 , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : Dict=36 , _lowercase : Tuple=2 , _lowercase : Optional[int]=4 , _lowercase : int=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Tuple=0.1 , _lowercase : str=5_12 , _lowercase : Dict=16 , _lowercase : int=2 , _lowercase : int=0.02 , _lowercase : Any=6 , _lowercase : List[Any]=6 , _lowercase : List[Any]=3 , _lowercase : List[Any]=4 , _lowercase : int=None , _lowercase : Optional[int]=10_00 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = coordinate_size
SCREAMING_SNAKE_CASE__ = shape_size
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ = text_seq_length
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ = self.text_seq_length + self.image_seq_length
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ = tmp_coordinate
SCREAMING_SNAKE_CASE__ = tf.constant(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel(config=_lowercase )
# text + image
SCREAMING_SNAKE_CASE__ = model(_lowercase , pixel_values=_lowercase , training=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , training=_lowercase , )
SCREAMING_SNAKE_CASE__ = model(_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ = model(_lowercase , training=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ = model({"""pixel_values""": pixel_values} , training=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self : int , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForSequenceClassification(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Any , _lowercase : Dict , _lowercase : Tuple , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForTokenClassification(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self : str , _lowercase : int , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForQuestionAnswering(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
return True
def __a ( self : List[str] , _lowercase : List[Any] , _lowercase : str , _lowercase : str=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_lowercase )
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = {
k: tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
if getattr(_lowercase , """hf_compute_loss""" , _lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowercase )[0]
]
SCREAMING_SNAKE_CASE__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = prepared_for_class.pop("""input_ids""" )
SCREAMING_SNAKE_CASE__ = model(_lowercase , **_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE__ = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE__ = -1_00
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , **_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE__ = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE__ = {0: """input_ids"""}
for label_key in label_keys:
SCREAMING_SNAKE_CASE__ = signature_names.index(_lowercase )
SCREAMING_SNAKE_CASE__ = label_key
SCREAMING_SNAKE_CASE__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE__ = prepared_for_class[value]
SCREAMING_SNAKE_CASE__ = tuple(_lowercase )
# Send to model
SCREAMING_SNAKE_CASE__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self : Optional[int] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : int ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : int ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : str ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : List[str] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
@slow
def __a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[int] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_lowercase ) if is_vision_available() else None
@slow
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_lowercase , return_tensors="""tf""" ).pixel_values
SCREAMING_SNAKE_CASE__ = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , _lowercase )
SCREAMING_SNAKE_CASE__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ) )
| 204 | 1 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__lowerCamelCase = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__lowerCamelCase = {
"jukebox": 5_12,
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : int = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=["v3", "v2", "v2"] ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=5 ,__UpperCAmelCase="<|endoftext|>" ,**__UpperCAmelCase ,) -> List[Any]:
A__ = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else unk_token
super().__init__(
unk_token=__UpperCAmelCase ,n_genres=__UpperCAmelCase ,version=__UpperCAmelCase ,max_n_lyric_tokens=__UpperCAmelCase ,**__UpperCAmelCase ,)
A__ = version
A__ = max_n_lyric_tokens
A__ = n_genres
with open(__UpperCAmelCase ,encoding='utf-8' ) as vocab_handle:
A__ = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase ,encoding='utf-8' ) as vocab_handle:
A__ = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase ,encoding='utf-8' ) as vocab_handle:
A__ = json.load(__UpperCAmelCase )
A__ = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
A__ = oov.replace(R'\-\'' ,R'\-+\'' )
A__ = regex.compile(__UpperCAmelCase )
A__ = {v: k for k, v in self.artists_encoder.items()}
A__ = {v: k for k, v in self.genres_encoder.items()}
A__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case__ ( self ) -> Optional[int]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case__ ( self ) -> Tuple:
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = [self.artists_encoder.get(__UpperCAmelCase ,0 ) for artist in list_artists]
for genres in range(len(__UpperCAmelCase ) ):
A__ = [self.genres_encoder.get(__UpperCAmelCase ,0 ) for genre in list_genres[genres]]
A__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
A__ = [[self.lyrics_encoder.get(__UpperCAmelCase ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
return list(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
A__ , A__ , A__ = self.prepare_for_tokenization(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = self._tokenize(__UpperCAmelCase )
return artist, genre, lyrics
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
A__ = artists[idx].lower()
A__ = [genres[idx].lower()]
else:
A__ = self._normalize(artists[idx] ) + '.v2'
A__ = [
self._normalize(__UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
A__ = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
A__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
A__ = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase ) )}
A__ = 0
A__ = len(__UpperCAmelCase ) + 1
A__ = self.vocab
A__ = {v: k for k, v in self.vocab.items()}
A__ = ''
else:
A__ = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
A__ = self._run_strip_accents(__UpperCAmelCase )
A__ = lyrics.replace('\\' ,'\n' )
A__ = self.out_of_vocab.sub('' ,__UpperCAmelCase ), [], []
return artists, genres, lyrics
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
A__ = unicodedata.normalize('NFD' ,__UpperCAmelCase )
A__ = []
for char in text:
A__ = unicodedata.category(__UpperCAmelCase )
if cat == "Mn":
continue
output.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
A__ = (
[chr(__UpperCAmelCase ) for i in range(ord('a' ) ,ord('z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('A' ) ,ord('Z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('0' ) ,ord('9' ) + 1 )]
+ ['.']
)
A__ = frozenset(__UpperCAmelCase )
A__ = re.compile(R'_+' )
A__ = ''.join([c if c in accepted else '_' for c in text.lower()] )
A__ = pattern.sub('_' ,__UpperCAmelCase ).strip('_' )
return text
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
return " ".join(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> Tuple:
# Convert to TensorType
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = TensorType(__UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
A__ = tf.constant
A__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
A__ = torch.tensor
A__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
A__ = jnp.array
A__ = _is_jax
else:
A__ = np.asarray
A__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
A__ = [inputs]
if not is_tensor(__UpperCAmelCase ):
A__ = as_tensor(__UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase="pt" ) -> BatchEncoding:
A__ = [0, 0, 0]
A__ = [artist] * len(self.version )
A__ = [genres] * len(self.version )
A__ , A__ , A__ = self.tokenize(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ , A__ , A__ = self._convert_token_to_id(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = [-INFINITY] * len(full_tokens[-1] )
A__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(__UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=__UpperCAmelCase ) )
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(__UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=__UpperCAmelCase ) )
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(__UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = self.artists_decoder.get(__UpperCAmelCase )
A__ = [self.genres_decoder.get(__UpperCAmelCase ) for genre in genres_index]
A__ = [self.lyrics_decoder.get(__UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 221 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["ChineseCLIPFeatureExtractor"]
__lowerCamelCase = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __snake_case ( __UpperCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """whisper"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , A : Any=51_865 , A : Optional[int]=80 , A : Union[str, Any]=6 , A : int=4 , A : Tuple=6 , A : Union[str, Any]=4 , A : Any=1_536 , A : Optional[Any]=1_536 , A : Tuple=0.0 , A : Optional[Any]=0.0 , A : List[str]=50_257 , A : int=True , A : str=True , A : Tuple="gelu" , A : Tuple=256 , A : List[str]=0.0 , A : int=0.0 , A : List[Any]=0.0 , A : List[str]=0.02 , A : Tuple=False , A : Union[str, Any]=1_500 , A : Optional[int]=448 , A : Optional[int]=50_256 , A : str=50_256 , A : str=50_256 , A : Union[str, Any]=None , A : List[Any]=[220, 50_256] , A : Optional[Any]=False , A : Any=256 , A : Optional[int]=False , A : Optional[Any]=0.05 , A : int=10 , A : Any=2 , A : Any=0.0 , A : List[Any]=10 , A : Tuple=0 , A : Optional[int]=7 , **A : Union[str, Any] , ):
__snake_case: int = vocab_size
__snake_case: Optional[Any] = num_mel_bins
__snake_case: int = d_model
__snake_case: int = encoder_layers
__snake_case: int = encoder_attention_heads
__snake_case: Any = decoder_layers
__snake_case: str = decoder_attention_heads
__snake_case: Optional[Any] = decoder_ffn_dim
__snake_case: Tuple = encoder_ffn_dim
__snake_case: int = dropout
__snake_case: Dict = attention_dropout
__snake_case: Union[str, Any] = activation_dropout
__snake_case: List[Any] = activation_function
__snake_case: str = init_std
__snake_case: Tuple = encoder_layerdrop
__snake_case: List[str] = decoder_layerdrop
__snake_case: List[Any] = use_cache
__snake_case: Optional[int] = encoder_layers
__snake_case: Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case: int = max_source_positions
__snake_case: Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__snake_case: Optional[int] = classifier_proj_size
__snake_case: Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: Any = mask_time_prob
__snake_case: Dict = mask_time_length
__snake_case: List[Any] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Dict = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
__snake_case: Any = median_filter_width
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , )
class __snake_case ( __UpperCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Any ):
__snake_case: List[str] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
__snake_case: int = {0: """batch"""}
else:
__snake_case: Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
return common_inputs
def UpperCAmelCase__ ( self : str , A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 22_050 , A : float = 5.0 , A : int = 220 , ):
__snake_case: Tuple = OrderedDict()
__snake_case: List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , )
__snake_case: Optional[Any] = encoder_inputs["""input_features"""].shape[2]
__snake_case: List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__snake_case: Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , A , A , A , A )
__snake_case: int = encoder_inputs.pop("""input_features""" )
__snake_case: Any = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
__snake_case: Optional[Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 1E-3
| 356 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
class _snake_case ( lowerCAmelCase_ ):
lowerCAmelCase_ : List[str] = 'masked_bert'
def __init__( self , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="topK" , a__="constant" , a__=0.0 , **a__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale
| 85 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ConvNextFeatureExtractor''']
UpperCamelCase__ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 299 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[Any] = """mobilenet_v2"""
def __init__( self : str , snake_case_ : List[str]=3 , snake_case_ : Any=2_2_4 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : int=8 , snake_case_ : List[str]=8 , snake_case_ : Dict=6 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]="relu6" , snake_case_ : int=True , snake_case_ : Any=0.8 , snake_case_ : List[str]=0.0_2 , snake_case_ : Optional[int]=0.0_0_1 , snake_case_ : Dict=2_5_5 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = depth_divisible_by
_UpperCAmelCase = min_depth
_UpperCAmelCase = expand_ratio
_UpperCAmelCase = output_stride
_UpperCAmelCase = first_layer_is_expansion
_UpperCAmelCase = finegrained_output
_UpperCAmelCase = hidden_act
_UpperCAmelCase = tf_padding
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = semantic_loss_ignore_index
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def lowercase ( self : Optional[int] ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase ( self : Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase ( self : List[Any] ):
return 1e-4
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 364 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "realm"
def __init__( self , A_=30522 , A_=768 , A_=128 , A_=12 , A_=12 , A_=8 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=256 , A_=10 , A_=1E-3 , A_=5 , A_=320 , A_=13353718 , A_=5000 , A_=1 , A_=0 , A_=2 , **A_ , ) -> Dict:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
# Common config
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =retriever_proj_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =num_candidates
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =type_vocab_size
__UpperCamelCase =layer_norm_eps
# Reader config
__UpperCamelCase =span_hidden_size
__UpperCamelCase =max_span_width
__UpperCamelCase =reader_layer_norm_eps
__UpperCamelCase =reader_beam_size
__UpperCamelCase =reader_seq_len
# Retrieval config
__UpperCamelCase =num_block_records
__UpperCamelCase =searcher_beam_size
| 117 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : torch.nn.Module , __A : BnbQuantizationConfig , __A : Union[str, os.PathLike] = None , __A : Optional[Dict[str, Union[int, str, torch.device]]] = None , __A : Optional[List[str]] = None , __A : Optional[Dict[Union[int, str], Union[int, str]]] = None , __A : Optional[Union[str, os.PathLike]] = None , __A : bool = False , ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = bnb_quantization_config.load_in_abit
a_ : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a_ : List[Any] = []
# custom device map
if isinstance(__A , __A ) and len(device_map.keys() ) > 1:
a_ : Optional[int] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a_ : List[str] = get_keys_to_not_convert(__A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__A )
a_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a_ : List[Any] = []
a_ : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__A )
# compatibility with peft
a_ : Any = load_in_abit
a_ : List[Any] = load_in_abit
a_ : List[Any] = get_parameter_device(__A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a_ : Any = replace_with_bnb_layers(__A , __A , modules_to_not_convert=__A )
# convert param to the right dtype
a_ : Union[str, Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a_ : int = name.replace('.weight' , '' ).replace('.bias' , '' )
a_ : int = getattr(__A , __A , __A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__A ):
param.to(__A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a_ : List[Any] = replace_with_bnb_layers(
__A , __A , modules_to_not_convert=__A )
a_ : Any = get_quantized_model_device_map(
__A , __A , __A , max_memory=__A , no_split_module_classes=__A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a_ : Dict = True
a_ : Any = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__A , __A , __A , dtype=bnb_quantization_config.torch_dtype , offload_folder=__A , offload_state_dict=__A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__A , device_map=__A , offload_dir=__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict , __A : List[Any]=None , __A : Union[str, Any]=None , __A : Optional[Any]=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
a_ : Dict = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__A , __A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a_ : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a_ : Optional[Any] = {}
a_ : Union[str, Any] = special_dtypes
a_ : Optional[int] = no_split_module_classes
a_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a_ : Optional[Any] = get_balanced_memory(
__A , low_zero=(device_map == 'balanced_low_0') , max_memory=__A , **__A , )
a_ : int = max_memory
a_ : Optional[int] = infer_auto_device_map(__A , **__A )
if isinstance(__A , __A ):
# check if don't have any quantized module on the cpu
a_ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a_ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : List[Any]=None , __A : List[Any]=None ) -> Any:
"""simple docstring"""
if modules_to_not_convert is None:
a_ : Union[str, Any] = []
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : Tuple=None , __A : Any=None , ) -> Any:
"""simple docstring"""
a_ : int = False
for name, module in model.named_children():
if current_key_name is None:
a_ : List[str] = []
current_key_name.append(__A )
if isinstance(__A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a_ : Union[str, Any] = '.'.join(__A )
a_ : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a_ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a_ : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a_ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a_ : Union[str, Any] = module.weight.data
if module.bias is not None:
a_ : str = module.bias.data
bnb_module.requires_grad_(__A )
setattr(__A , __A , __A )
a_ : int = True
if len(list(module.children() ) ) > 0:
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
a_ : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
with init_empty_weights():
a_ : List[str] = deepcopy(__A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a_ : List[str] = find_tied_parameters(__A )
# For compatibility with Accelerate < 0.18
if isinstance(__A , __A ):
a_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a_ : Any = sum(__A , [] )
a_ : Optional[int] = len(__A ) > 0
# Check if it is a base model
a_ : Union[str, Any] = False
if hasattr(__A , 'base_model_prefix' ):
a_ : Union[str, Any] = not hasattr(__A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a_ : Tuple = list(model.named_children() )
a_ : str = [list_modules[-1][0]]
# add last module together with tied weights
a_ : List[str] = set(__A ) - set(__A )
a_ : Dict = list(set(__A ) ) + list(__A )
# remove ".weight" from the keys
a_ : List[str] = ['.weight', '.bias']
a_ : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a_ : Dict = name.replace(__A , '' )
filtered_module_names.append(__A )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Any:
"""simple docstring"""
for m in model.modules():
if isinstance(__A , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( __A : nn.Module ) -> Union[str, Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Optional[Any] , __A : List[str] , __A : Dict , __A : Tuple , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__A , __A , 0 , dtype=__A , value=__A )
a_ : Optional[int] = param_name
a_ : List[str] = model
if "." in tensor_name:
a_ : int = tensor_name.split('.' )
for split in splits[:-1]:
a_ : int = getattr(__A , __A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a_ : Optional[Any] = new_module
a_ : List[str] = splits[-1]
# offload weights
a_ : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , __A , __A , index=__A )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __A , index=__A , )
else:
offload_weight(__A , __A , __A , index=__A )
offload_weight(__A , param_name.replace('weight' , 'SCB' ) , __A , index=__A )
set_module_tensor_to_device(__A , __A , 'meta' , dtype=__A , value=torch.empty(*param.size() ) )
| 32 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
'''simple docstring'''
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 1
_snake_case = True
for v in tree[start]:
if v not in visited:
ret += dfs(_SCREAMING_SNAKE_CASE )
if ret % 2 == 0:
cuts.append(_SCREAMING_SNAKE_CASE )
return ret
def __SCREAMING_SNAKE_CASE ( ):
dfs(1 )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase = 10, 9
__lowerCAmelCase = defaultdict(list)
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 270 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "nat"
lowerCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> str:
super().__init__(**UpperCAmelCase )
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(UpperCAmelCase )
_snake_case = num_heads
_snake_case = kernel_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = layer_norm_eps
_snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_snake_case = layer_scale_init_value
_snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_snake_case, _snake_case = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names ) | 270 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : Optional[int] ) -> Tuple:
"""simple docstring"""
a_ : Tuple = os.path.abspath(__A )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
a_ : List[str] = tf.train.list_variables(__A )
a_ : Dict = []
a_ : str = []
a_ : List[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
a_ : Union[str, Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
a_ : Dict = name[1:]
# figure out how many levels deep the name is
a_ : str = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(__A )
# read data
a_ : Any = tf.train.load_variable(__A , __A )
names.append('/'.join(__A ) )
arrays.append(__A )
logger.info(F"""Read a total of {len(__A ):,} layers""" )
# Sanity check
if len(set(__A ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__A ) )})""" )
a_ : Union[str, Any] = list(set(__A ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(__A , __A ):
a_ : List[str] = full_name.split('/' )
a_ : List[str] = model
a_ : int = []
for i, m_name in enumerate(__A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
a_ : Optional[Any] = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
a_ : List[str] = getattr(__A , 'embeddings' )
a_ : Any = getattr(__A , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
a_ : Optional[int] = getattr(__A , 'encoder' )
a_ : Union[str, Any] = getattr(__A , 'layer' )
a_ : List[str] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
a_ : str = getattr(__A , 'pooler' )
a_ : List[Any] = getattr(__A , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
a_ : Optional[int] = getattr(__A , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
a_ : int = getattr(__A , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
a_ : List[str] = getattr(__A , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
a_ : str = getattr(__A , 'token_type_embeddings' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('weight' )
a_ : Any = getattr(__A , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
a_ : Dict = getattr(__A , 'attention' )
a_ : Optional[int] = getattr(__A , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
a_ : Optional[int] = getattr(__A , 'attention' )
a_ : Dict = getattr(__A , 'output' )
a_ : Any = getattr(__A , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
a_ : Optional[int] = getattr(__A , 'attention' )
a_ : int = getattr(__A , 'output' )
a_ : List[Any] = getattr(__A , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
a_ : Any = getattr(__A , 'output' )
a_ : Any = getattr(__A , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
a_ : Tuple = getattr(__A , 'output' )
a_ : Any = getattr(__A , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
a_ : Optional[int] = getattr(__A , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
a_ : Tuple = getattr(__A , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
a_ : Any = getattr(__A , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
a_ : Any = getattr(__A , 'intermediate' )
a_ : Optional[int] = getattr(__A , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
a_ : Optional[int] = getattr(__A , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
a_ : Any = getattr(__A , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
a_ : str = getattr(__A , 'weight' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
a_ : Union[str, Any] = '.'.join(__A )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __A ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , __A ):
a_ : Dict = array.reshape(pointer.data.shape )
if "kernel" in full_name:
a_ : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
a_ : Tuple = torch.from_numpy(__A )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[int] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
logger.info(F"""Loading model based on config from {config_path}...""" )
a_ : str = BertConfig.from_json_file(__A )
a_ : Optional[Any] = BertModel(__A )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__A , __A , __A )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 32 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , "sklearn" )
return (preds == labels).mean()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , "sklearn" )
_UpperCAmelCase : Any = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : int = fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , "sklearn" )
_UpperCAmelCase : List[Any] = pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0]
_UpperCAmelCase : Any = spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , "sklearn" )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , "sklearn" )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
| 359 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322 | 0 |
import random
from typing import Any
def lowercase_ ( _lowerCamelCase : list):
for _ in range(len(_lowerCamelCase)):
lowercase__ : Dict = random.randint(0 , len(_lowerCamelCase) - 1)
lowercase__ : Union[str, Any] = random.randint(0 , len(_lowerCamelCase) - 1)
lowercase__ , lowercase__ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : Dict = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Any , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]="[UNK]" , __lowerCamelCase : Union[str, Any]="[SEP]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : Optional[Any]="[CLS]" , __lowerCamelCase : Any="[MASK]" , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Dict , ) -> Tuple:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : Optional[int] , __lowerCamelCase : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=False , **__lowerCamelCase : Union[str, Any] , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 355 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(_A , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 218 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> set[str]:
SCREAMING_SNAKE_CASE__ : Dict = set(__lowerCAmelCase ), [start]
while stack:
SCREAMING_SNAKE_CASE__ : int = stack.pop()
explored.add(__lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCAmelCase )
return explored
a :Tuple = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 132 | from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Optional[int]="cosine" , )->Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.0001,__lowerCamelCase = 0.02,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = True,__lowerCamelCase = 0,__lowerCamelCase = "epsilon",__lowerCamelCase = 1.0,**__lowerCamelCase,):
if kwargs.get('''set_alpha_to_one''',__lowerCamelCase ) is not None:
A__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''','''1.0.0''',__lowerCamelCase,standard_warn=__lowerCamelCase )
A__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0,__lowerCamelCase ).copy().astype(np.intaa ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
A__ = num_inference_steps
A__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
A__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 0.0,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = True,):
# 1. get previous step value (=t+1)
A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A__ = self.alphas_cumprod[timestep]
A__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A__ = model_output
elif self.config.prediction_type == "sample":
A__ = model_output
A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase,pred_original_sample=__lowerCamelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 39 |
from __future__ import annotations
import time
import numpy as np
a__: Optional[Any] = [8, 5, 9, 7]
a__: Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a__: List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase ( self ):
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
A__ = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase ( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = 2**power
A__ = 0
while n:
A__ , A__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 7 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 288 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( snake_case , snake_case , snake_case = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(snake_case ), magnitude * sin(snake_case )]
return [magnitude * cos(radians(snake_case ) ), magnitude * sin(radians(snake_case ) )]
def snake_case_ ( snake_case , snake_case , snake_case = 10**-1 ) -> bool:
lowercase__: NDArray[floataa] = cross(snake_case , snake_case )
lowercase__: float = sum(snake_case )
return abs(snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowerCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowerCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowerCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__lowerCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 288 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE :Optional[int] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , _lowercase : str=None , _lowercase : Tuple=None , *_lowercase : List[str] , **_lowercase : Dict ):
super().__init__(*_lowercase , **_lowercase )
if config is None:
assert isinstance(self.model , _lowercase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
__UpperCAmelCase = self.model.config
else:
__UpperCAmelCase = config
__UpperCAmelCase = data_args
__UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , _lowercase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
__UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase = label_smoothed_nll_loss
def a ( self : Any , _lowercase : int ):
if self.optimizer is None:
__UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
__UpperCAmelCase = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase = Adafactor
__UpperCAmelCase = {'''scale_parameter''': False, '''relative_step''': False}
else:
__UpperCAmelCase = AdamW
__UpperCAmelCase = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase = OSS(
params=_lowercase , optim=_lowercase , **_lowercase , )
else:
__UpperCAmelCase = optimizer_cls(_lowercase , **_lowercase )
if self.lr_scheduler is None:
__UpperCAmelCase = self._get_lr_scheduler(_lowercase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def a ( self : int , _lowercase : List[Any] ):
__UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_lowercase )
return scheduler
def a ( self : Union[str, Any] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a ( self : Optional[int] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase = model(**_lowercase , use_cache=_lowercase )[0]
__UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase = model(**_lowercase , labels=_lowercase , use_cache=_lowercase )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase = model(**_lowercase , use_cache=_lowercase )[0]
__UpperCAmelCase = torch.nn.functional.log_softmax(_lowercase , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase = self.loss_fn(_lowercase , _lowercase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a ( self : Tuple , _lowercase : List[str] , _lowercase : int ):
__UpperCAmelCase = inputs.pop('''labels''' )
__UpperCAmelCase , __UpperCAmelCase = self._compute_loss(_lowercase , _lowercase , _lowercase )
return loss
def a ( self : Any , _lowercase : nn.Module , _lowercase : Dict[str, Union[torch.Tensor, Any]] , _lowercase : bool , _lowercase : Optional[List[str]] = None , ):
__UpperCAmelCase = self._prepare_inputs(_lowercase )
__UpperCAmelCase = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **_lowercase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase = self._pad_tensors_to_max_len(_lowercase , gen_kwargs['''max_length'''] )
__UpperCAmelCase = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase = self._compute_loss(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase = self._pad_tensors_to_max_len(_lowercase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Any ):
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F''' padded to `max_length`={max_length}''' )
__UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase = tensor
return padded_tensor
| 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 | 1 |
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = abs(__lowerCAmelCase )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def __A ( )-> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
_UpperCAmelCase = F"""{func.__name__}({value})"""
_UpperCAmelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 39 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_UpperCAmelCase : Optional[int] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a__)
class __lowerCAmelCase ( a__):
_a = "rag"
_a = True
def __init__( self: Optional[int] , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: str=None , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: List[str]=" / " , _lowerCAmelCase: Union[str, Any]=" // " , _lowerCAmelCase: List[Any]=5 , _lowerCAmelCase: Any=3_00 , _lowerCAmelCase: Union[str, Any]=7_68 , _lowerCAmelCase: Optional[int]=8 , _lowerCAmelCase: Union[str, Any]="wiki_dpr" , _lowerCAmelCase: str="train" , _lowerCAmelCase: List[Any]="compressed" , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: Any=None , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Optional[Any]=None , **_lowerCAmelCase: Union[str, Any] , ):
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , vocab_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase :Union[str, Any] = kwargs.pop("question_encoder" )
lowercase :str = question_encoder_config.pop("model_type" )
lowercase :Optional[Any] = kwargs.pop("generator" )
lowercase :List[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase :str = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase :Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase :Optional[Any] = reduce_loss
lowercase :Optional[Any] = label_smoothing
lowercase :int = exclude_bos_score
lowercase :Tuple = do_marginalize
lowercase :Optional[Any] = title_sep
lowercase :Any = doc_sep
lowercase :List[str] = n_docs
lowercase :Optional[int] = max_combined_length
lowercase :List[Any] = dataset
lowercase :Union[str, Any] = dataset_split
lowercase :str = index_name
lowercase :List[str] = retrieval_vector_size
lowercase :Any = retrieval_batch_size
lowercase :Optional[Any] = passages_path
lowercase :Dict = index_path
lowercase :Optional[Any] = use_dummy_dataset
lowercase :Any = output_retrieved
lowercase :Optional[int] = do_deduplication
lowercase :List[str] = use_cache
if self.forced_eos_token_id is None:
lowercase :int = getattr(self.generator , "forced_eos_token_id" , SCREAMING_SNAKE_CASE_ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :str = copy.deepcopy(self.__dict__ )
lowercase :str = self.question_encoder.to_dict()
lowercase :Union[str, Any] = self.generator.to_dict()
lowercase :Optional[Any] = self.__class__.model_type
return output
| 365 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 158 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "WhisperFeatureExtractor"
lowerCAmelCase_ = "WhisperTokenizer"
def __init__( self : Any , __lowercase : Optional[Any] , __lowercase : Tuple ):
"""simple docstring"""
super().__init__(__lowercase , __lowercase )
__lowercase =self.feature_extractor
__lowercase =False
def snake_case ( self : List[Any] , __lowercase : Optional[int]=None , __lowercase : Any=None , __lowercase : Tuple=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowercase , language=__lowercase , no_timestamps=__lowercase )
def __call__( self : List[Any] , *__lowercase : Tuple , **__lowercase : Dict ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
__lowercase =kwargs.pop('audio' , __lowercase )
__lowercase =kwargs.pop('sampling_rate' , __lowercase )
__lowercase =kwargs.pop('text' , __lowercase )
if len(__lowercase ) > 0:
__lowercase =args[0]
__lowercase =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowercase =self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
__lowercase =self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase =encodings['input_ids']
return inputs
def snake_case ( self : List[Any] , *__lowercase : Dict , **__lowercase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def snake_case ( self : Union[str, Any] , *__lowercase : str , **__lowercase : str ):
"""simple docstring"""
return self.tokenizer.decode(*__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : str , __lowercase : Dict="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(__lowercase , return_tensors=__lowercase )
| 141 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : int, lowercase__ : str ):
'''simple docstring'''
try:
with open(lowercase__, 'rb' ) as flax_state_f:
__lowercase =from_bytes(lowercase__, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : List[str] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowercase =flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa, lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowercase =jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowercase__ )
__lowercase =''
__lowercase =flatten_dict(lowercase__, sep='.' )
__lowercase =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowercase =[]
__lowercase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowercase =flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowercase =flax_key_tuple_array[:-1] + ['weight']
__lowercase =jnp.transpose(lowercase__, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowercase =flax_key_tuple_array[:-1] + ['weight']
__lowercase =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowercase =flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
__lowercase =(
flax_key_tuple_string.replace('_0', '.0' )
.replace('_1', '.1' )
.replace('_2', '.2' )
.replace('_3', '.3' )
.replace('_4', '.4' )
.replace('_5', '.5' )
.replace('_6', '.6' )
.replace('_7', '.7' )
.replace('_8', '.8' )
.replace('_9', '.9' )
)
__lowercase ='.'.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__lowercase =np.asarray(lowercase__ ) if not isinstance(lowercase__, np.ndarray ) else flax_tensor
__lowercase =torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__lowercase =list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 141 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int
a_ : TreeNode | None = None
a_ : TreeNode | None = None
lowercase__ = namedtuple("""CoinsDistribResult""", """moves excess""")
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__UpperCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__UpperCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__UpperCamelCase ) != count_coins(__UpperCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__UpperCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_distrib(node.left )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = get_distrib(node.right )
lowerCAmelCase_ : Optional[int] = 1 - left_distrib_excess
lowerCAmelCase_ : List[str] = 1 - right_distrib_excess
lowerCAmelCase_ : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(__UpperCamelCase )
+ abs(__UpperCamelCase )
)
lowerCAmelCase_ : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__UpperCamelCase , __UpperCamelCase )
return get_distrib(__UpperCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """vocab.txt"""}
lowercase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowercase__ = {
"""openbmb/cpm-ant-10b""": 1024,
}
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = collections.OrderedDict()
with open(__UpperCamelCase , "r" , encoding="utf-8" ) as reader:
lowerCAmelCase_ : List[Any] = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : List[str] = token.rstrip("\n" )
lowerCAmelCase_ : str = index
return vocab
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Tuple , a_ : Dict , a_ : Optional[Any]="<unk>" , a_ : List[str]=2_00 ):
lowerCAmelCase_ : int = vocab
lowerCAmelCase_ : List[Any] = unk_token
lowerCAmelCase_ : List[Any] = max_input_chars_per_word
def lowerCamelCase ( self : Any , a_ : Optional[int] ):
lowerCAmelCase_ : Optional[int] = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = []
while start < len(a_ ):
lowerCAmelCase_ : Any = len(a_ )
lowerCAmelCase_ : Any = None
while start < end:
lowerCAmelCase_ : Union[str, Any] = "".join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase_ : Any = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
lowerCAmelCase_ : int = end
return sub_tokens
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = VOCAB_FILES_NAMES
a_ : int = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ["""input_ids""", """attention_mask"""]
a_ : Union[str, Any] = False
def __init__( self : Union[str, Any] , a_ : List[str] , a_ : Dict="<d>" , a_ : Tuple="</d>" , a_ : Tuple="<s>" , a_ : int="</s>" , a_ : Tuple="<pad>" , a_ : Dict="<unk>" , a_ : Any="</n>" , a_ : Optional[int]="</_>" , a_ : List[Any]="left" , **a_ : List[Any] , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
lowerCAmelCase_ : Optional[Any] = bod_token
lowerCAmelCase_ : Union[str, Any] = eod_token
lowerCAmelCase_ : Optional[Any] = load_vocab(a_ )
lowerCAmelCase_ : List[str] = self.encoder[space_token]
lowerCAmelCase_ : int = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase_ : Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
lowerCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCamelCase ( self : List[Any] ):
return self.encoder[self.bod_token]
@property
def lowerCamelCase ( self : List[str] ):
return self.encoder[self.eod_token]
@property
def lowerCamelCase ( self : int ):
return self.encoder["\n"]
@property
def lowerCamelCase ( self : Tuple ):
return len(self.encoder )
def lowerCamelCase ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Optional[int] , a_ : Any ):
lowerCAmelCase_ : Optional[int] = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , **a_ : Tuple ):
lowerCAmelCase_ : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase_ : List[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , a_ : Union[str, Any] ):
return token in self.encoder
def lowerCamelCase ( self : List[Any] , a_ : List[str] ):
return "".join(a_ )
def lowerCamelCase ( self : Union[str, Any] , a_ : str ):
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : Union[str, Any] , a_ : int ):
return self.decoder.get(a_ , self.unk_token )
def lowerCamelCase ( self : List[str] , a_ : str , a_ : Optional[str] = None ):
if os.path.isdir(a_ ):
lowerCAmelCase_ : Union[str, Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowerCAmelCase_ : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory
lowerCAmelCase_ : str = 0
if " " in self.encoder:
lowerCAmelCase_ : Optional[int] = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase_ : Optional[int] = self.encoder["\n"]
del self.encoder["\n"]
lowerCAmelCase_ : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def lowerCamelCase ( self : int , a_ : List[int] , a_ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase ( self : List[str] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 161 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : List[str] = num_of_nodes
lowerCAmelCase__ : list[list[int]] = []
lowerCAmelCase__ : dict[int, int] = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase__ : Optional[Any] = self.find_component(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__UpperCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase__ : Union[str, Any] = self.find_component(__UpperCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> None:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase__ : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = edge
lowerCAmelCase__ : Union[str, Any] = self.m_component[u]
lowerCAmelCase__ : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase__ : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = edge
lowerCAmelCase__ : Optional[int] = self.m_component[u]
lowerCAmelCase__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase__ : Tuple = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | """simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = UNetaDModel
snake_case__ = "sample"
@property
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : int = 4
__lowerCamelCase : List[Any] = 3
__lowerCamelCase : List[Any] = (32, 32)
__lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : int ):
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : Tuple ):
return (3, 32, 32)
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = UNetaDModel
snake_case__ = "sample"
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[Any] = 4
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = (32, 32)
__lowerCamelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return (4, 32, 32)
@property
def lowerCamelCase__ ( self : Any ):
return (4, 32, 32)
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__lowerCamelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
__lowerCamelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase , __lowerCamelCase : List[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
__lowerCamelCase : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase__ ( self : Tuple ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__lowerCamelCase , __lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
__lowerCamelCase : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : int = noise.to(UpperCAmelCase )
__lowerCamelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
__lowerCamelCase : int = model_accelerate(UpperCAmelCase , UpperCAmelCase )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__lowerCamelCase , __lowerCamelCase : str = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
__lowerCamelCase : Any = model_normal_load(UpperCAmelCase , UpperCAmelCase )["sample"]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-3 )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase )
__lowerCamelCase : str = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : Optional[int] = noise.to(UpperCAmelCase )
__lowerCamelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase ).sample
__lowerCamelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowerCamelCase : Optional[Any] = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-3 ) )
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = UNetaDModel
snake_case__ = "sample"
@property
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any]=(32, 32) ):
__lowerCamelCase : Tuple = 4
__lowerCamelCase : int = 3
__lowerCamelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Union[str, Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__lowerCamelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : List[str] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
__lowerCamelCase : int = self.dummy_input
__lowerCamelCase : int = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase )
__lowerCamelCase : int = noise
__lowerCamelCase : int = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : int = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = 4
__lowerCamelCase : List[Any] = 3
__lowerCamelCase : Tuple = (256, 256)
__lowerCamelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Tuple = model(UpperCAmelCase , UpperCAmelCase ).sample
__lowerCamelCase : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCamelCase : List[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-2 ) )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = 4
__lowerCamelCase : Any = 3
__lowerCamelCase : Union[str, Any] = (32, 32)
__lowerCamelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : Dict = torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : List[str] = model(UpperCAmelCase , UpperCAmelCase ).sample
__lowerCamelCase : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCamelCase : int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-2 ) )
def lowerCamelCase__ ( self : int ):
# not required for this model
pass | 135 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : str , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , a : Optional[Union[str, os.PathLike]] , **a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 350 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 0 |
'''simple docstring'''
import qiskit
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
__magic_name__ : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__magic_name__ : Dict = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Tuple = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}') | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def _UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 364 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Tuple:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase : Union[str, Any] = model_type_to_module_name(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = importlib.import_module(F'.{module_name}' , "transformers.models" )
try:
return getattr(lowerCAmelCase , lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase , "__name__" , lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase : Any = importlib.import_module("transformers" )
if hasattr(lowerCAmelCase , lowerCAmelCase ):
return getattr(lowerCAmelCase , lowerCAmelCase )
return None
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, os.PathLike] , lowerCAmelCase: Optional[Union[str, os.PathLike]] = None , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: Optional[Dict[str, str]] = None , lowerCAmelCase: Optional[Union[bool, str]] = None , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: bool = False , **lowerCAmelCase: List[Any] , ) -> Any:
_UpperCAmelCase : List[Any] = get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowerCAmelCase , encoding="utf-8" ) as reader:
return json.load(lowerCAmelCase )
class a :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(A_ )
def _UpperCAmelCase ( cls , A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("config" , A_ )
_UpperCAmelCase : Dict = kwargs.pop("trust_remote_code" , A_ )
_UpperCAmelCase : str = True
_UpperCAmelCase , _UpperCAmelCase : Tuple = ImageProcessingMixin.get_image_processor_dict(A_ , **A_ )
_UpperCAmelCase : Any = config_dict.get("image_processor_type" , A_ )
_UpperCAmelCase : str = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase : int = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase : Any = config_dict.pop("feature_extractor_type" , A_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_UpperCAmelCase : Optional[Any] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase : Optional[Any] = config_dict["auto_map"]["AutoFeatureExtractor"]
_UpperCAmelCase : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A_ , A_ ):
_UpperCAmelCase : Any = AutoConfig.from_pretrained(A_ , **A_ )
# It could be in `config.image_processor_type``
_UpperCAmelCase : Optional[Any] = getattr(A_ , "image_processor_type" , A_ )
if hasattr(A_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase : Any = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_UpperCAmelCase : List[str] = image_processor_class_from_name(A_ )
_UpperCAmelCase : Optional[Any] = image_processor_auto_map is not None
_UpperCAmelCase : Any = image_processor_class is not None or type(A_ ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase : List[Any] = resolve_trust_remote_code(
A_ , A_ , A_ , A_ )
if has_remote_code and trust_remote_code:
_UpperCAmelCase : Optional[int] = get_class_from_dynamic_module(
A_ , A_ , **A_ )
_UpperCAmelCase : Optional[int] = kwargs.pop("code_revision" , A_ )
if os.path.isdir(A_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A_ , **A_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(A_ , **A_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A_ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(A_ )]
return image_processor_class.from_dict(A_ , **A_ )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _UpperCAmelCase ( A_ , A_ ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(A_ , A_ )
| 189 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a : Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a :
"""simple docstring"""
def __init__( self : Any , __lowercase : int , __lowercase : List[Any]=16 , __lowercase : Union[str, Any]=13 , __lowercase : int=7 , __lowercase : int=14 , __lowercase : Union[str, Any]=10 , __lowercase : int=19 , __lowercase : Any=5 , __lowercase : List[str]=4 , __lowercase : str=True , __lowercase : Union[str, Any]=16 , __lowercase : str=2 , __lowercase : Any=4 , __lowercase : Tuple=4 , __lowercase : str="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : List[str]=0.1 , __lowercase : Any=[1, 2, 3, 4, 5] , __lowercase : Dict=25 , __lowercase : Dict=5 , ) -> str:
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Dict = prediction_length
__UpperCAmelCase : Optional[int] = context_length
__UpperCAmelCase : int = cardinality
__UpperCAmelCase : Optional[Any] = num_time_features
__UpperCAmelCase : Any = lags_sequence
__UpperCAmelCase : Union[str, Any] = embedding_dimension
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = context_length
__UpperCAmelCase : Union[str, Any] = prediction_length + label_length
__UpperCAmelCase : Dict = label_length
__UpperCAmelCase : Any = moving_average
__UpperCAmelCase : Optional[int] = autocorrelation_factor
def UpperCAmelCase ( self : List[Any] ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Optional[int] ) -> List[str]:
__UpperCAmelCase : List[Any] = config.context_length + max(config.lags_sequence )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__UpperCAmelCase : str = floats_tensor([self.batch_size, _past_length] )
__UpperCAmelCase : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
__UpperCAmelCase : List[Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = self.get_config()
__UpperCAmelCase : Union[str, Any] = self.prepare_autoformer_inputs_dict(__lowercase )
return config, inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Dict:
__UpperCAmelCase : Any = AutoformerModel(config=__lowercase ).to(__lowercase ).eval()
__UpperCAmelCase : Any = model(**__lowercase )
__UpperCAmelCase : Tuple = outputs.encoder_last_hidden_state
__UpperCAmelCase : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = model.get_encoder()
encoder.save_pretrained(__lowercase )
__UpperCAmelCase : Dict = AutoformerEncoder.from_pretrained(__lowercase ).to(__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = model.create_network_inputs(**__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__UpperCAmelCase : str = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__UpperCAmelCase : int = encoder(inputs_embeds=__lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__UpperCAmelCase : int = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__UpperCAmelCase : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__UpperCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__UpperCAmelCase : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = AutoformerDecoder.from_pretrained(__lowercase ).to(__lowercase )
__UpperCAmelCase : Optional[int] = decoder(
trend=__lowercase , inputs_embeds=__lowercase , encoder_hidden_states=__lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
a : Union[str, Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
a : int = False
a : List[Any] = False
a : Tuple = False
a : Optional[int] = False
a : Any = False
a : Dict = False
def UpperCAmelCase ( self : Dict ) -> Dict:
__UpperCAmelCase : Tuple = AutoformerModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : List[str] ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCAmelCase ( self : Tuple ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def UpperCAmelCase ( self : int ) -> Any:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase : Any = inspect.signature(getattr(__lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
__UpperCAmelCase : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(__lowercase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : str = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__lowercase )] , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : str = getattr(self.model_tester , """seq_length""" , __lowercase )
__UpperCAmelCase : Dict = getattr(self.model_tester , """decoder_seq_length""" , __lowercase )
__UpperCAmelCase : Dict = getattr(self.model_tester , """encoder_seq_length""" , __lowercase )
__UpperCAmelCase : int = getattr(self.model_tester , """d_model""" , __lowercase )
__UpperCAmelCase : List[str] = getattr(self.model_tester , """num_attention_heads""" , __lowercase )
__UpperCAmelCase : int = d_model // num_attention_heads
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : Tuple = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__UpperCAmelCase : Optional[int] = len(__lowercase )
__UpperCAmelCase : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowercase , __lowercase )
# decoder attentions
__UpperCAmelCase : Any = outputs.decoder_attentions
self.assertIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__UpperCAmelCase : str = outputs.cross_attentions
self.assertIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 2 , len(__lowercase ) )
__UpperCAmelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( __lowerCamelCase : Any="train-batch.pt" ):
__UpperCAmelCase : Tuple = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=__lowerCamelCase , repo_type="""dataset""" )
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
return batch
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
__UpperCAmelCase : str = prepare_batch()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__UpperCAmelCase : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Any = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=__lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : int ) -> int:
__UpperCAmelCase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
__UpperCAmelCase : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__UpperCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__UpperCAmelCase : List[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Tuple = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=__lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
__UpperCAmelCase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__UpperCAmelCase : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__UpperCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowercase )
__UpperCAmelCase : Optional[int] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=__lowercase )
__UpperCAmelCase : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowercase , rtol=1e-1 ) )
| 114 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , __lowercase : Optional[NestedDataStructureLike[PathLike]] = None , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : List[str] , ) -> Tuple:
__UpperCAmelCase : Any = path_or_paths
__UpperCAmelCase : Dict = split if split or isinstance(__lowercase , __lowercase ) else """train"""
__UpperCAmelCase : Optional[int] = features
__UpperCAmelCase : str = cache_dir
__UpperCAmelCase : str = keep_in_memory
__UpperCAmelCase : Optional[int] = streaming
__UpperCAmelCase : Dict = num_proc
__UpperCAmelCase : Tuple = kwargs
@abstractmethod
def UpperCAmelCase ( self : List[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : Optional[Any] , ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = features
__UpperCAmelCase : str = cache_dir
__UpperCAmelCase : Optional[int] = keep_in_memory
__UpperCAmelCase : Dict = streaming
__UpperCAmelCase : Optional[Any] = num_proc
__UpperCAmelCase : Union[str, Any] = kwargs
@abstractmethod
def UpperCAmelCase ( self : List[str] ) -> Union[Dataset, IterableDataset]:
pass
| 114 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase : Tuple = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowercase : str = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowercase : Any = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ),
} ) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = 1 ,snake_case = 4 ,):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case ,hypotheses=snake_case ,min_len=snake_case ,max_len=snake_case )
}
| 285 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : Tuple = get_logger(__name__)
lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md"""
lowercase : Dict = uuida().hex
lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str:
lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if token is None:
lowercase : Optional[int] = HfFolder.get_token()
if organization is None:
lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None
lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : Optional[Any] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None:
if new_cache_dir is None:
lowercase : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Any = 0
else:
with open(cache_version_file) as f:
try:
lowercase : List[Any] = int(f.read())
except ValueError:
lowercase : int = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
if variant is not None:
lowercase : List[str] = weights_name.split(""".""" )
lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _snake_case( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase : Any = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 285 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["model.decoder.embed_positions.weights"]
def __snake_case( _lowerCAmelCase ) -> Any:
if "emb" in name:
snake_case__ : int = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case__ : int = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case__ : Optional[int] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case__ : Union[str, Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case__ : List[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case__ : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case__ : Any = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case__ : int = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case__ : str = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case__ : Tuple = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[Dict, Dict]:
snake_case__ : Any = list(state_dict.keys() )
snake_case__ : Tuple = {}
for key in keys:
snake_case__ : Tuple = state_dict.pop(_lowerCAmelCase )
snake_case__ : List[Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case__ : List[Any] = val[:hidden_size, :]
snake_case__ : List[Any] = val[hidden_size : 2 * hidden_size, :]
snake_case__ : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case__ : Union[str, Any] = val
else:
snake_case__ : int = val
return state_dict, enc_dec_proj_state_dict
def __snake_case( _lowerCAmelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case__ : Dict = 1_024
snake_case__ : Tuple = 24
snake_case__ : int = 16
elif checkpoint == "medium":
snake_case__ : List[str] = 1_536
snake_case__ : List[Any] = 48
snake_case__ : int = 24
elif checkpoint == "large":
snake_case__ : Optional[Any] = 2_048
snake_case__ : Optional[int] = 48
snake_case__ : List[Any] = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
snake_case__ : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="cpu" ) -> Any:
snake_case__ : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
snake_case__ : Any = decoder_config_from_checkpoint(_lowerCAmelCase )
snake_case__ : int = fairseq_model.lm.state_dict()
snake_case__ , snake_case__ : List[Any] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
snake_case__ : int = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case__ : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case__ : str = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case__ , snake_case__ : Tuple = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
snake_case__ : Tuple = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
snake_case__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case__ : Optional[int] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case__ : Tuple = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
snake_case__ : Dict = 2_048
snake_case__ : Optional[int] = 2_048
# set other default generation config params
snake_case__ : Tuple = int(30 * audio_encoder.config.frame_rate )
snake_case__ : Tuple = True
snake_case__ : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 35 |
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : Tuple = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ : Optional[int] = 6
__magic_name__ : Dict = 128
__magic_name__ : Any = (2, 2, 18, 2)
__magic_name__ : Tuple = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ : Optional[Any] = 12
__magic_name__ : List[Any] = 192
__magic_name__ : Tuple = (2, 2, 18, 2)
__magic_name__ : List[Any] = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__magic_name__ : Optional[int] = window_size
__magic_name__ : Any = embed_dim
__magic_name__ : List[str] = depths
__magic_name__ : int = num_heads
return config
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
if "encoder.mask_token" in name:
__magic_name__ : Tuple = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__magic_name__ : Union[str, Any] = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__magic_name__ : List[str] = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__magic_name__ : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__magic_name__ : List[str] = name.replace("attn" , "attention.self" )
if "norm1" in name:
__magic_name__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__magic_name__ : Optional[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__magic_name__ : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__magic_name__ : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__magic_name__ : Union[str, Any] = "layernorm.weight"
if name == "encoder.norm.bias":
__magic_name__ : Optional[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
__magic_name__ : List[str] = "swin." + name
return name
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__magic_name__ : Tuple = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ : str = key.split("." )
__magic_name__ : Tuple = int(key_split[2] )
__magic_name__ : List[str] = int(key_split[4] )
__magic_name__ : Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ : List[Any] = val[:dim, :]
__magic_name__ : Dict = val[
dim : dim * 2, :
]
__magic_name__ : List[str] = val[-dim:, :]
else:
__magic_name__ : Tuple = val[
:dim
]
__magic_name__ : str = val[
dim : dim * 2
]
__magic_name__ : Dict = val[
-dim:
]
else:
__magic_name__ : List[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( _snake_case : str , _snake_case : int , _snake_case : List[str] , _snake_case : Tuple ) -> Any:
'''simple docstring'''
__magic_name__ : Tuple = torch.load(_snake_case , map_location="cpu" )["model"]
__magic_name__ : List[str] = get_swin_config(_snake_case )
__magic_name__ : int = SwinForMaskedImageModeling(_snake_case )
model.eval()
__magic_name__ : Tuple = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
__magic_name__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : int = ViTImageProcessor(size={"height": 192, "width": 192} )
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__magic_name__ : int = image_processor(images=_snake_case , return_tensors="pt" )
with torch.no_grad():
__magic_name__ : List[str] = model(**_snake_case ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 41 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
snake_case : List[str] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ["BeitFeatureExtractor"]
snake_case : Optional[int] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 41 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a__ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
__lowerCAmelCase = jnp.floataa
__lowerCAmelCase = True
def __magic_name__ ( self ):
super().setup()
lowercase : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_a , **_a ):
lowercase : Dict = super().__call__(*_a , **_a )
lowercase : str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a__ ( __lowerCAmelCase ):
__lowerCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : int , __snake_case : Union[str, Any] ) -> Optional[Any]:
def cross_entropy(__snake_case : str , __snake_case : str , __snake_case : str=None ):
lowercase : Any = logits.shape[-1]
lowercase : List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype("f4" )
lowercase : List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
lowercase : Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase : str = reduction(_SCREAMING_SNAKE_CASE )
return loss
lowercase : Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
lowercase : List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a__ :
__lowerCAmelCase = """google/bigbird-roberta-base"""
__lowerCAmelCase = 3000
__lowerCAmelCase = 1_0500
__lowerCAmelCase = 128
__lowerCAmelCase = 3
__lowerCAmelCase = 1
__lowerCAmelCase = 5
# tx_args
__lowerCAmelCase = 3E-5
__lowerCAmelCase = 0.0
__lowerCAmelCase = 2_0000
__lowerCAmelCase = 0.0095
__lowerCAmelCase = """bigbird-roberta-natural-questions"""
__lowerCAmelCase = """training-expt"""
__lowerCAmelCase = """data/nq-training.jsonl"""
__lowerCAmelCase = """data/nq-validation.jsonl"""
def __magic_name__ ( self ):
os.makedirs(self.base_dir , exist_ok=_a )
lowercase : str = os.path.join(self.base_dir , self.save_dir )
lowercase : List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = 4096 # no dynamic padding on TPUs
def __call__( self , _a ):
lowercase : int = self.collate_fn(_a )
lowercase : Optional[int] = jax.tree_util.tree_map(_a , _a )
return batch
def __magic_name__ ( self , _a ):
lowercase : Dict = self.fetch_inputs(features["input_ids"] )
lowercase : List[Any] = {
'input_ids': jnp.array(_a , dtype=jnp.intaa ),
'attention_mask': jnp.array(_a , dtype=jnp.intaa ),
'start_labels': jnp.array(features["start_token"] , dtype=jnp.intaa ),
'end_labels': jnp.array(features["end_token"] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def __magic_name__ ( self , _a ):
lowercase : List[Any] = [self._fetch_inputs(_a ) for ids in input_ids]
return zip(*_a )
def __magic_name__ ( self , _a ):
lowercase : Union[str, Any] = [1 for _ in range(len(_a ) )]
while len(_a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __magic_name__ ( __snake_case : Any , __snake_case : List[Any] , __snake_case : Tuple=None ) -> List[Any]:
if seed is not None:
lowercase : int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
lowercase : Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name="batch" )
def __magic_name__ ( __snake_case : List[str] , __snake_case : int , **__snake_case : str ) -> Any:
def loss_fn(__snake_case : Any ):
lowercase : str = model_inputs.pop("start_labels" )
lowercase : Dict = model_inputs.pop("end_labels" )
lowercase : Optional[int] = model_inputs.pop("pooled_labels" )
lowercase : Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
lowercase : Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
lowercase : Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
lowercase : List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
lowercase : str = grad_fn(state.params )
lowercase : Optional[int] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowercase : int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , "batch" )
lowercase : Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def __magic_name__ ( __snake_case : Union[str, Any] , **__snake_case : List[str] ) -> Optional[Any]:
lowercase : Optional[int] = model_inputs.pop("start_labels" )
lowercase : int = model_inputs.pop("end_labels" )
lowercase : Dict = model_inputs.pop("pooled_labels" )
lowercase : Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
lowercase : int = outputs
lowercase : Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : Tuple = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class a__ ( train_state.TrainState ):
__lowerCAmelCase = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = None
def __magic_name__ ( self , _a , _a , _a , _a=None ):
lowercase : Dict = model.params
lowercase : Any = TrainState.create(
apply_fn=model.__call__ , params=_a , tx=_a , loss_fn=_a , )
if ckpt_dir is not None:
lowercase : Any = restore_checkpoint(_a , _a )
lowercase : Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowercase : str = build_tx(**_a )
lowercase : Optional[Any] = train_state.TrainState(
step=_a , apply_fn=model.__call__ , params=_a , tx=_a , opt_state=_a , )
lowercase : int = args
lowercase : Union[str, Any] = data_collator
lowercase : Any = lr
lowercase : Dict = params
lowercase : Tuple = jax_utils.replicate(_a )
return state
def __magic_name__ ( self , _a , _a , _a ):
lowercase : int = self.args
lowercase : str = len(_a ) // args.batch_size
lowercase : Tuple = jax.random.PRNGKey(0 )
lowercase : List[Any] = jax.random.split(_a , jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : str = jnp.array(0 , dtype=jnp.floataa )
lowercase : Tuple = get_batched_dataset(_a , args.batch_size , seed=_a )
lowercase : Optional[int] = 0
for batch in tqdm(_a , total=_a , desc=f"""Running EPOCH-{epoch}""" ):
lowercase : List[str] = self.data_collator(_a )
lowercase : int = self.train_step_fn(_a , _a , **_a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
lowercase : List[Any] = jax_utils.unreplicate(state.step )
lowercase : Tuple = running_loss.item() / i
lowercase : Optional[Any] = self.scheduler_fn(state_step - 1 )
lowercase : List[Any] = self.evaluate(_a , _a )
lowercase : List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(_a ) )
self.logger.log(_a , commit=_a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=_a )
def __magic_name__ ( self , _a , _a ):
lowercase : Tuple = get_batched_dataset(_a , self.args.batch_size )
lowercase : Dict = len(_a ) // self.args.batch_size
lowercase : Tuple = jnp.array(0 , dtype=jnp.floataa )
lowercase : List[Any] = 0
for batch in tqdm(_a , total=_a , desc="Evaluating ... " ):
lowercase : str = self.data_collator(_a )
lowercase : List[str] = self.val_step_fn(_a , **_a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def __magic_name__ ( self , _a , _a ):
lowercase : List[Any] = jax_utils.unreplicate(_a )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(_a , params=state.params )
with open(os.path.join(_a , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_a , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(_a , "data_collator.joblib" ) )
with open(os.path.join(_a , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , _a )
print("DONE" )
def __magic_name__ ( __snake_case : List[Any] , __snake_case : List[Any] ) -> Optional[Any]:
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "flax_model.msgpack" ) , "rb" ) as f:
lowercase : int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "opt_state.msgpack" ) , "rb" ) as f:
lowercase : Optional[Any] = from_bytes(state.opt_state , f.read() )
lowercase : Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , "args.joblib" ) )
lowercase : int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , "data_collator.joblib" ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "training_state.json" ) , "r" ) as f:
lowercase : Any = json.load(_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] = training_state['step']
print("DONE" )
return params, opt_state, step, args, data_collator
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : Any , __snake_case : int ) -> Optional[int]:
lowercase : str = num_train_steps - warmup_steps
lowercase : str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
lowercase : List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1E-7 , transition_steps=_SCREAMING_SNAKE_CASE )
lowercase : int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : List[Any] ) -> Tuple:
def weight_decay_mask(__snake_case : int ):
lowercase : List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
lowercase : List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
lowercase : List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 202 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = 0
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop('image_processor_type' )
UpperCAmelCase__ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'clip-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('clip-base' )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a , revision='aaaaaa' )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
with self.assertRaises(__a ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('custom' , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(__a ) / 'preprocessor_config.json'
UpperCAmelCase__ = Path(__a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
UpperCAmelCase__ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register('custom' , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(__a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 335 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 1 |
import math
import unittest
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ),"Zero doesn't have any positive factors, primes must have exactly two.",)
self.assertFalse(
is_prime(1 ),"One only has 1 positive factor, primes must have exactly two.",)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=5 ) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
UpperCAmelCase : str = torch.tensor(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase : Dict = model(_lowercase )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase : Any = logits[0, masked_index, :]
UpperCAmelCase : Optional[Any] = logits.softmax(dim=0 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = prob.topk(k=_lowercase , dim=0 )
UpperCAmelCase : Optional[Any] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowercase ) )] )
UpperCAmelCase : List[str] = tokenizer.mask_token
UpperCAmelCase : Tuple = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
UpperCAmelCase : str = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(_lowercase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(_lowercase ) , _lowercase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowercase , _lowercase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
a : Union[str, Any] = CamembertTokenizer.from_pretrained("""camembert-base""")
a : Dict = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
a : Union[str, Any] = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 338 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Optional[int] = 1_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase : Tuple = two_third + 1
else:
UpperCAmelCase : int = one_third + 1
UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : str = (left + right) // 3 + 1
UpperCAmelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = input("""Enter numbers separated by comma:\n""").strip()
a : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 338 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=1_3 , SCREAMING_SNAKE_CASE_ : Optional[int]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : Union[str, Any] = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : List[str] = use_token_type_ids
lowerCAmelCase_ : str = use_labels
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : Optional[int] = type_sequence_label_size
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : int = num_labels
lowerCAmelCase_ : Tuple = num_choices
lowerCAmelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Dict = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Optional[int] = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# create attention mask
lowerCAmelCase_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.seq_length // 2
lowerCAmelCase_ : Optional[Any] = 0
# first forward pass
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCAmelCase_ : Optional[Any] = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1
lowerCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCAmelCase_ : List[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCAmelCase_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , )
# get two different outputs
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
# select random slice
lowerCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : Dict = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Dict = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
lowerCAmelCase_ : str = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
# first forward pass
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ ,lowerCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
'last_hidden_state'
]
# select random slice
lowerCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ):
lowerCAmelCase_ : Optional[Any] = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , *SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : str = BioGptModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : List[str] = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) : List[str] = config_and_inputs
lowerCAmelCase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (BioGptForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : str = BioGptModelTester(self )
lowerCAmelCase_ : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCAmelCase_ : str = 'left'
# Define PAD Token = EOS Token = 50256
lowerCAmelCase_ : Optional[Any] = tokenizer.eos_token
lowerCAmelCase_ : Dict = model.config.eos_token_id
# use different length sentences to test batching
lowerCAmelCase_ : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ : int = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = inputs['input_ids'].to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = model.generate(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) , )
lowerCAmelCase_ : List[str] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
lowerCAmelCase_ : List[str] = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Dict = input_dict['input_ids']
lowerCAmelCase_ : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ : List[str] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ ,lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Tuple = 'multi_label_classification'
lowerCAmelCase_ : int = input_dict['input_ids']
lowerCAmelCase_ : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
lowerCAmelCase_ : Dict = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : Union[str, Any] = 4_2_3_8_4
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCAmelCase_ : Optional[int] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = tokenizer('COVID-19 is' , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model.generate(
**SCREAMING_SNAKE_CASE_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 289 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : List[Any]="last" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : str = seq_length
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_lengths
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : str = use_labels
lowerCAmelCase_ : str = gelu_activation
lowerCAmelCase_ : str = sinusoidal_embeddings
lowerCAmelCase_ : List[Any] = causal
lowerCAmelCase_ : Union[str, Any] = asm
lowerCAmelCase_ : Union[str, Any] = n_langs
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Any = n_special
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : List[Any] = type_sequence_label_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Union[str, Any] = num_choices
lowerCAmelCase_ : Union[str, Any] = summary_type
lowerCAmelCase_ : Optional[Any] = use_proj
lowerCAmelCase_ : List[Any] = scope
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Dict = None
if self.use_input_lengths:
lowerCAmelCase_ : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ : Any = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = None
if self.use_labels:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase_ : Union[str, Any] = FlaubertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , lengths=SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Any = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Tuple = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : Optional[int] = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , p_mask=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Optional[int] = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , )
((lowerCAmelCase_) ,) : int = result_with_labels.to_tuple()
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
((lowerCAmelCase_) ,) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase_ : Optional[int] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : Optional[Any] = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase_ : Dict = self.num_choices
lowerCAmelCase_ : Optional[Any] = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) : Dict = config_and_inputs
lowerCAmelCase_ : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
lowerCAmelCase_ : Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Tuple = FlaubertModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'traced_model.pt' ) )
lowerCAmelCase_ : Optional[Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'traced_model.pt' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['input_ids'].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
lowerCAmelCase_ : List[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 289 | 1 |
"""simple docstring"""
__magic_name__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 100 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 100 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
snake_case : Tuple = logging.getLogger()
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a :int = parser.parse_args()
return args.f
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
a :str = {}
a :Union[str, Any] = os.path.join(UpperCAmelCase_ , '''all_results.json''' )
if os.path.exists(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , '''r''' ) as f:
a :Tuple = json.load(UpperCAmelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
snake_case : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _snake_case ( _snake_case ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
a :int = tempfile.mkdtemp()
a :Optional[Any] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a :Optional[int] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_auto_remove_tmp_dir()
a :Union[str, Any] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a :Optional[Any] = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_auto_remove_tmp_dir()
a :Dict = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a :str = get_results(_lowerCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_auto_remove_tmp_dir()
a :int = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
a :str = get_results(_lowerCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a :Dict = 7 if get_gpu_count() > 1 else 2
a :Any = self.get_auto_remove_tmp_dir()
a :Dict = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
a :Tuple = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.get_auto_remove_tmp_dir()
a :List[Any] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
a :List[Any] = get_results(_lowerCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_auto_remove_tmp_dir()
a :Union[str, Any] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
a :List[str] = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.get_auto_remove_tmp_dir()
a :Any = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
a :Tuple = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.get_auto_remove_tmp_dir()
a :Optional[int] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
a :Optional[Any] = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''translation_no_trainer''' ) ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowerCamelCase )
a :Dict = self.get_auto_remove_tmp_dir()
a :Dict = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
a :Optional[int] = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.get_auto_remove_tmp_dir()
a :Any = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a :Optional[int] = get_results(_lowerCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''image_classification_no_trainer''' ) ) )
| 281 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Dict = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'data2vec-vision'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Optional[int] = num_attention_heads
a :Dict = intermediate_size
a :List[Any] = hidden_act
a :List[str] = hidden_dropout_prob
a :Union[str, Any] = attention_probs_dropout_prob
a :Any = initializer_range
a :Any = layer_norm_eps
a :Union[str, Any] = image_size
a :int = patch_size
a :Optional[int] = num_channels
a :Union[str, Any] = use_mask_token
a :Optional[Any] = use_absolute_position_embeddings
a :Tuple = use_relative_position_bias
a :List[Any] = use_shared_relative_position_bias
a :Dict = layer_scale_init_value
a :Optional[int] = drop_path_rate
a :List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
a :str = out_indices
a :Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
a :List[Any] = use_auxiliary_head
a :List[Any] = auxiliary_loss_weight
a :Optional[int] = auxiliary_channels
a :List[str] = auxiliary_num_convs
a :str = auxiliary_concat_input
a :Union[str, Any] = semantic_loss_ignore_index
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 281 | 1 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCAmelCase_ ( snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : float = 1 , snake_case_ : float = 1 , snake_case_ : float = 1.0E4 , snake_case_ : bool = False , snake_case_ : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
UpperCAmelCase_ = float(embedding_dim // 2 )
UpperCAmelCase_ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase_ = min_timescale * jnp.exp(jnp.arange(snake_case_ , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase_ = jnp.expand_dims(snake_case_ , 1 ) * jnp.expand_dims(snake_case_ , 0 )
# scale embeddings
UpperCAmelCase_ = scale * emb
if flip_sin_to_cos:
UpperCAmelCase_ = jnp.concatenate([jnp.cos(snake_case_ ), jnp.sin(snake_case_ )] , axis=1 )
else:
UpperCAmelCase_ = jnp.concatenate([jnp.sin(snake_case_ ), jnp.cos(snake_case_ )] , axis=1 )
UpperCAmelCase_ = jnp.reshape(snake_case_ , [jnp.shape(snake_case_ )[0], embedding_dim] )
return signal
class __A ( nn.Module ):
a__ : int = 32
a__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__(self : Dict , __a : Tuple ):
UpperCAmelCase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__a )
UpperCAmelCase_ = nn.silu(__a )
UpperCAmelCase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__a )
return temb
class __A ( nn.Module ):
a__ : int = 32
a__ : bool = False
a__ : float = 1
@nn.compact
def __call__(self : Optional[Any] , __a : List[Any] ):
return get_sinusoidal_embeddings(
__a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : List[str] = """vit_mae"""
def __init__( self : Dict , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Optional[int]=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Union[str, Any]=1e-12 , UpperCamelCase__ : Dict=2_2_4 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : List[Any]=5_1_2 , UpperCamelCase__ : str=8 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : int=0.75 , UpperCamelCase__ : Tuple=False , **UpperCamelCase__ : List[str] , )-> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: int = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: str = num_attention_heads
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: Any = hidden_act
__lowerCAmelCase: Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[Any] = layer_norm_eps
__lowerCAmelCase: List[str] = image_size
__lowerCAmelCase: List[str] = patch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Dict = qkv_bias
__lowerCAmelCase: List[Any] = decoder_num_attention_heads
__lowerCAmelCase: str = decoder_hidden_size
__lowerCAmelCase: Any = decoder_num_hidden_layers
__lowerCAmelCase: List[Any] = decoder_intermediate_size
__lowerCAmelCase: Tuple = mask_ratio
__lowerCAmelCase: List[str] = norm_pix_loss
| 108 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__A = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
__A = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__A = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowercase_ ( self : int , UpperCamelCase__ : Dict)-> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
__lowerCAmelCase: List[str] = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCAmelCase: Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCAmelCase: Tuple = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCAmelCase: Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
__lowerCAmelCase: Dict = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> str:
'''simple docstring'''
__lowerCAmelCase: str = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__)
return {"scores": scores}
| 108 | 1 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = 0
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
snake_case : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(SCREAMING_SNAKE_CASE , "vocab.txt" ) )
snake_case : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="bert" , use_fast=SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(SCREAMING_SNAKE_CASE , "merges.txt" ) )
snake_case : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="gpt2" , use_fast=SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(SCREAMING_SNAKE_CASE , "vocab.txt" ) )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="bert" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(SCREAMING_SNAKE_CASE , "merges.txt" ) )
snake_case : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="gpt2" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case : Tuple = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
snake_case : List[str] = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = TOKENIZER_MAPPING.values()
snake_case : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(SCREAMING_SNAKE_CASE )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , SCREAMING_SNAKE_CASE )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=SCREAMING_SNAKE_CASE )
snake_case : int = "Hello, world. How are you?"
snake_case : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertEqual("[UNK]" , tokens[0] )
snake_case : List[str] = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=SCREAMING_SNAKE_CASE )
snake_case : Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = get_tokenizer_config("bert-base-cased" )
snake_case : str = config.pop("_commit_hash" , SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(SCREAMING_SNAKE_CASE , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case : str = get_tokenizer_config(SCREAMING_SNAKE_CASE )
self.assertDictEqual(SCREAMING_SNAKE_CASE , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Tuple = get_tokenizer_config(SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
snake_case : List[Any] = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : str = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
snake_case : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE )
snake_case : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def lowerCamelCase_ ( self ):
"""simple docstring"""
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Any = False
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[int] = NewTokenizer
a__ : Optional[int] = False
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
snake_case : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
snake_case : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case : Any = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case : List[Any] = AutoTokenizer.from_pretrained("bert-base" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
snake_case : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 148 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : str = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_attention_mask
snake_case : str = use_token_type_ids
snake_case : int = use_labels
snake_case : Any = vocab_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_attention_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : List[Any] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=SCREAMING_SNAKE_CASE )
snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
snake_case : List[Any] = 50_000
snake_case : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148 | 1 |
"""simple docstring"""
from __future__ import annotations
class __A :
"""simple docstring"""
def __init__( self , __A = 0 ) -> Dict:
a =key
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> list[str]:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> list[str]:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self , __A , __A = 0 ) -> str:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a =''''''
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self , __A , __A = 0 ) -> str:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a =''''''
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self , __A , __A = 0 ) -> bool:
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__A , __A ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> bool:
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__A , __A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 215 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Tuple = OpenAIGPTTokenizer
__lowerCamelCase : Optional[Any] = OpenAIGPTTokenizerFast
__lowerCamelCase : List[str] = True
__lowerCamelCase : List[Any] = False
def _snake_case ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
return "lower newer", "lower newer"
def _snake_case ( self ) -> Any:
_lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = "lower"
_lowerCAmelCase = ["low", "er</w>"]
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokens + ["<unk>"]
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase=15 ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# Simple input
_lowerCAmelCase = "This is a simple input"
_lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
_lowerCAmelCase = ("This is a simple input", "This is a pair")
_lowerCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , )
def _snake_case ( self ) -> Dict:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ):
pass
| 158 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Tuple = datasets.Audio()
__lowerCamelCase : List[str] = "audio"
__lowerCamelCase : Optional[int] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Optional[int] = AudioClassification(audio_column="audio" ,label_column="label" )
_SCREAMING_SNAKE_CASE = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_SCREAMING_SNAKE_CASE = AUDIO_EXTENSIONS
| 158 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger('''transformers.models.speecht5''')
__SCREAMING_SNAKE_CASE :List[Any] = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__SCREAMING_SNAKE_CASE :Any = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__SCREAMING_SNAKE_CASE :str = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__SCREAMING_SNAKE_CASE :Dict = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__SCREAMING_SNAKE_CASE :List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :Dict = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__SCREAMING_SNAKE_CASE :List[str] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE :int = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE :Union[str, Any] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : List[str] ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_UpperCAmelCase = getattr(__lowercase , __lowercase )
if weight_type is not None:
_UpperCAmelCase = getattr(__lowercase , __lowercase ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Optional[int] ) -> Tuple:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(f'{name} was ignored' )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(".*." )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(__lowercase )[0].split("." )[-2]
_UpperCAmelCase = mapped_key.replace("*" , __lowercase )
if "weight_g" in name:
_UpperCAmelCase = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase = "weight_v"
elif "bias" in name:
_UpperCAmelCase = "bias"
elif "weight" in name:
_UpperCAmelCase = "weight"
elif "running_mean" in name:
_UpperCAmelCase = "running_mean"
elif "running_var" in name:
_UpperCAmelCase = "running_var"
elif "num_batches_tracked" in name:
_UpperCAmelCase = "num_batches_tracked"
else:
_UpperCAmelCase = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] , __lowercase : str , __lowercase : List[str] , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = full_name.split("conv_layers." )[-1]
_UpperCAmelCase = name.split("." )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : List[Any]=None , __lowercase : Union[str, Any]=None , __lowercase : int=None , ) -> Dict:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(__lowercase )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(__lowercase )
elif task == "t2s":
_UpperCAmelCase = 1876
_UpperCAmelCase = 600
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(__lowercase )
elif task == "s2s":
_UpperCAmelCase = 1876
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(__lowercase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(__lowercase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken("<mask>" , lstrip=__lowercase , rstrip=__lowercase )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=__lowercase , feature_extractor=__lowercase )
processor.save_pretrained(__lowercase )
_UpperCAmelCase = torch.load(__lowercase )
recursively_load_weights(fairseq_checkpoint["model"] , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 156 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE :str = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
__SCREAMING_SNAKE_CASE :int = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = BertTokenizer
def __init__( self : Union[str, Any] , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : List[Any]="[PAD]" , snake_case_ : int="[CLS]" , snake_case_ : Dict="[MASK]" , snake_case_ : Any=True , snake_case_ : int=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(snake_case_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**snake_case_ )
_UpperCAmelCase = do_lower_case
def lowercase ( self : str , snake_case_ : str , snake_case_ : Any=None ):
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Any , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 156 | 1 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = LongformerTokenizer
__lowercase : Union[str, Any] = True
__lowercase : Any = LongformerTokenizerFast
__lowercase : int = True
def UpperCAmelCase_ ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase__ : Tuple = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase__ : Union[str, Any] = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = """lower newer"""
lowerCAmelCase__ : Union[str, Any] = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase__ : Optional[Any] = """lower newer"""
lowerCAmelCase__ : List[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCAmelCase__ : Any = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" ,add_special_tokens=__UpperCAmelCase ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" ,add_special_tokens=__UpperCAmelCase ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowerCAmelCase__ : List[str] = tokenizer.encode("""sequence builders""" ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : int = tokenizer.encode(
"""sequence builders""" ,add_special_tokens=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = """Encode this sequence."""
lowerCAmelCase__ : Tuple = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowerCAmelCase__ : str = tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowerCAmelCase__ : int = tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Testing spaces after special tokens
lowerCAmelCase__ : Optional[int] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase )} ) # mask token has a left space
lowerCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = """Encode <mask> sequence"""
lowerCAmelCase__ : int = """Encode <mask>sequence"""
lowerCAmelCase__ : str = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ : int = encoded.index(__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ : str = encoded.index(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
lowerCAmelCase__ : int = tokenizer_r.encode_plus(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.encode_plus(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCAmelCase__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__UpperCAmelCase ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCAmelCase_ ( self ) -> int:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] ,__UpperCAmelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] ,__UpperCAmelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Dict = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ : Any = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : int = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : str = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
| 37 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__ :
def A_ ( self : str ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A_ ( self : List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inputs['prompt']
SCREAMING_SNAKE_CASE__ = inputs['generator']
SCREAMING_SNAKE_CASE__ = inputs['num_inference_steps']
SCREAMING_SNAKE_CASE__ = inputs['output_type']
if "image" in inputs:
SCREAMING_SNAKE_CASE__ = inputs['image']
else:
SCREAMING_SNAKE_CASE__ = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE__ = inputs['mask_image']
else:
SCREAMING_SNAKE_CASE__ = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE__ = inputs['original_image']
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe.encode_prompt(UpperCAmelCase_ )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE__ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE__ = image
if mask_image is not None:
SCREAMING_SNAKE_CASE__ = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pipe(**UpperCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.pipeline_class.from_pretrained(UpperCAmelCase_ )
pipe_loaded.to(UpperCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inputs['generator']
SCREAMING_SNAKE_CASE__ = inputs['num_inference_steps']
SCREAMING_SNAKE_CASE__ = inputs['output_type']
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE__ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE__ = image
if mask_image is not None:
SCREAMING_SNAKE_CASE__ = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE__ = original_image
SCREAMING_SNAKE_CASE__ = pipe_loaded(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = np.abs(to_np(UpperCAmelCase_ ) - to_np(UpperCAmelCase_ ) ).max()
self.assertLess(UpperCAmelCase_ , 1e-4 )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pipe(**UpperCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.pipeline_class.from_pretrained(UpperCAmelCase_ )
pipe_loaded.to(UpperCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pipe_loaded(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = np.abs(to_np(UpperCAmelCase_ ) - to_np(UpperCAmelCase_ ) ).max()
self.assertLess(UpperCAmelCase_ , 1e-4 )
| 169 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__snake_case = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
__snake_case = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
__snake_case = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__snake_case = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
__snake_case = tf.keras.preprocessing.image.img_to_array(test_image)
__snake_case = np.expand_dims(test_image, axis=0)
__snake_case = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__snake_case = """Normal"""
if result[0][0] == 1:
__snake_case = """Abnormality detected"""
| 169 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase__ ) for s in shape] )}.npy"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase__ ( self , snake_case__=0 , snake_case__=(4, 4, 64, 64) , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : int = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Tuple = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ )
return image
def lowercase__ ( self , snake_case__=False , snake_case__="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Dict = """bf16""" if fpaa else None
lowerCAmelCase : List[str] = FlaxUNetaDConditionModel.from_pretrained(
UpperCAmelCase__ , subfolder="unet" , dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ )
return model, params
def lowercase__ ( self , snake_case__=0 , snake_case__=(4, 77, 768) , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : str = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=UpperCAmelCase__ )
lowerCAmelCase : Optional[int] = self.get_latents(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
lowerCAmelCase : Dict = self.get_encoder_hidden_states(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
lowerCAmelCase : List[str] = model.apply(
{"params": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase : str = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=UpperCAmelCase__ )
lowerCAmelCase : List[str] = self.get_latents(UpperCAmelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCAmelCase__ )
lowerCAmelCase : Optional[Any] = self.get_encoder_hidden_states(UpperCAmelCase__ , shape=(4, 77, 1_024) , fpaa=UpperCAmelCase__ )
lowerCAmelCase : str = model.apply(
{"params": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase : Dict = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
| 108 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int ="""openai/whisper-base"""
UpperCAmelCase__ : Dict =(
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase__ : List[str] ="""transcriber"""
UpperCAmelCase__ : Union[str, Any] =WhisperProcessor
UpperCAmelCase__ : Union[str, Any] =WhisperForConditionalGeneration
UpperCAmelCase__ : Tuple =["""audio"""]
UpperCAmelCase__ : List[Any] =["""text"""]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors="""pt""" ).input_features
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 245 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A( _a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MvpTokenizer
UpperCamelCase = MvpTokenizerFast
UpperCamelCase = True
UpperCamelCase = filter_roberta_detectors
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase_ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowerCamelCase_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase_ = {"""unk_token""": """<unk>"""}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def a__ ( self : Optional[Any] , **A_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def a__ ( self : List[str] , **A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def a__ ( self : List[Any] , A_ : List[str] ) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase_ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors='pt' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(snake_case_ , padding=snake_case_ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , snake_case_ )
self.assertIn('attention_mask' , snake_case_ )
self.assertNotIn('labels' , snake_case_ )
self.assertNotIn('decoder_attention_mask' , snake_case_ )
@require_torch
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(text_target=snake_case_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case_ , truncation=snake_case_ , return_tensors='pt' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = ["""A long paragraph for summarization."""]
lowerCamelCase_ = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors='pt' )
lowerCamelCase_ = inputs["""input_ids"""]
lowerCamelCase_ = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
lowerCamelCase_ = """A, <mask> AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
lowerCamelCase_ = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 371 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowercase )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'](roberta.extract_features(lowercase ) )
else:
lowerCamelCase_ = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 208 | 0 |
"""simple docstring"""
from math import ceil
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
_UpperCAmelCase : Tuple = list(range(0 , UpperCamelCase__ ) )
_UpperCAmelCase : Any = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_UpperCAmelCase : str = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
_UpperCAmelCase : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
_UpperCAmelCase : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Tuple = list(range(UpperCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
_UpperCAmelCase : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
from collections.abc import Sequence
def lowerCamelCase ( SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
__UpperCamelCase :Optional[int] = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Any = nums[i]
__UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE , ans + num , SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__lowercase = int(input('''Enter number of elements : ''').strip())
__lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 371 | import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = torch.nn.Linear(2 , 4 )
__UpperCamelCase :Any = torch.optim.AdamW(model.parameters() , lr=1.0 )
__UpperCamelCase :List[Any] = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__UpperCamelCase :List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCamelCase :Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_cuda
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Dict = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase):
__UpperCamelCase :Any = Accelerator(cpu=__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase :List[Any] = GradientState()
assert state.num_steps == 1
__UpperCamelCase :Any = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCamelCase :int = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = create_components()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :int = accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def UpperCamelCase__ ( self) -> Union[str, Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowercase , **__lowercase):
pass
with patch('''torch.cuda.set_device''' , __lowercase), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64'''):
__UpperCamelCase :Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device) , '''cuda:64''')
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Tuple = get_signature(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Any = get_signature(__lowercase)
# saving hook
def save_config(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__lowercase , '''data.json''') , '''w''') as f:
json.dump(__lowercase , __lowercase)
# loading hook
def load_config(__lowercase , __lowercase):
with open(os.path.join(__lowercase , '''data.json''') , '''r''') as f:
__UpperCamelCase :Dict = json.load(__lowercase)
__UpperCamelCase :Dict = config['''class_name''']
__UpperCamelCase :Union[str, Any] = accelerator.register_save_state_pre_hook(__lowercase)
__UpperCamelCase :Any = accelerator.register_load_state_pre_hook(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match with hooks
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# random class name to verify correct one is loaded
__UpperCamelCase :int = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# random class name to verify correct one is loaded
__UpperCamelCase :Dict = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = create_components()
__UpperCamelCase :Optional[Any] = None
# This should work
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertTrue(dummy_obj is None)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = create_components()
__UpperCamelCase :Dict = [1, 2, 3]
# This should work
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def UpperCamelCase__ ( self) -> int:
from transformers import AutoModelForCausalLM
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map={'''''': 0} , )
__UpperCamelCase :Optional[Any] = Accelerator()
# This should work
__UpperCamelCase :int = accelerator.prepare(__lowercase)
@slow
@require_bnb
def UpperCamelCase__ ( self) -> List[str]:
from transformers import AutoModelForCausalLM
__UpperCamelCase :str = Accelerator()
with init_empty_weights():
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__UpperCamelCase :List[str] = infer_auto_device_map(__lowercase)
__UpperCamelCase :str = '''cpu'''
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=__lowercase , load_in_abit=__lowercase , llm_inta_enable_fpaa_cpu_offload=__lowercase)
# This should not work and get value error
with self.assertRaises(__lowercase):
__UpperCamelCase :Union[str, Any] = accelerator.prepare(__lowercase)
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
__UpperCamelCase :int = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCamelCase :Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__UpperCamelCase :int = infer_auto_device_map(__lowercase)
__UpperCamelCase :List[Any] = 1
__UpperCamelCase :int = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
__UpperCamelCase :Dict = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase):
__UpperCamelCase :Any = accelerator.prepare(__lowercase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__UpperCamelCase :List[str] = infer_auto_device_map(__lowercase)
__UpperCamelCase :Optional[int] = 1
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
__UpperCamelCase :int = Accelerator()
# This should work
__UpperCamelCase :int = accelerator.prepare(__lowercase)
@require_cuda
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = torch.nn.Linear(10 , 10)
__UpperCamelCase :Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01)
__UpperCamelCase :Any = Accelerator(cpu=__lowercase)
__UpperCamelCase :Tuple = accelerator.prepare(__lowercase)
| 105 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Tuple = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase : Optional[Any] = tf_top_k_top_p_filtering(A , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase : Tuple = output[output != -float('''inf''' )]
_UpperCAmelCase : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(A , A , rtol=1E-12 )
tf.debugging.assert_equal(A , A )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ,a ):
'''simple docstring'''
if is_tf_available():
a__ ={
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# TF-only test: tf.saved_model export
_UpperCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = 2
class _UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
super(A , self ).__init__()
_UpperCAmelCase : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : List[Any] = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase : List[Any] = [[2, 0], [1_0_2, 1_0_3]]
_UpperCAmelCase : int = [[1, 0], [1, 1]]
_UpperCAmelCase : Any = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
_UpperCAmelCase : Any = tf.saved_model.load(A ).signatures['''serving_default''']
for batch_size in range(1 , len(A ) + 1 ):
_UpperCAmelCase : str = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase : int = serving_func(**A )['''sequences''']
_UpperCAmelCase : int = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
def __lowerCAmelCase ( self ) -> Any:
# TF-only test: tf.saved_model export
_UpperCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = 1
_UpperCAmelCase : List[Any] = 2
class _UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , A ) -> int:
super(A , self ).__init__()
_UpperCAmelCase : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : Tuple = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase : List[Any] = [[2], [1_0_2, 1_0_3]]
_UpperCAmelCase : Union[str, Any] = [[1], [1, 1]]
_UpperCAmelCase : str = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
_UpperCAmelCase : Tuple = tf.saved_model.load(A ).signatures['''serving_default''']
for input_row in range(len(A ) ):
_UpperCAmelCase : str = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase : int = serving_func(**A )['''sequences''']
_UpperCAmelCase : Any = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
@require_tensorflow_text
def __lowerCAmelCase ( self ) -> Any:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=A )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> List[str]:
super().__init__()
_UpperCAmelCase : Dict = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A , '''spiece.model''' ) , '''rb''' ).read() )
_UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __lowerCAmelCase ( self , A , *A , **A ) -> Any:
_UpperCAmelCase : Any = self.tokenizer.tokenize(A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = text.pad_model_inputs(
A , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase : List[str] = self.model.generate(input_ids=A , attention_mask=A )
return self.tokenizer.detokenize(A )
_UpperCAmelCase : List[str] = CompleteSentenceTransformer()
_UpperCAmelCase : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
_UpperCAmelCase : str = complete_model(A )
_UpperCAmelCase : int = tf.keras.Model(A , A )
keras_model.save(A )
def __lowerCAmelCase ( self ) -> Tuple:
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase : Tuple = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
_UpperCAmelCase : Optional[Any] = 1_4
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Optional[int] = '''Hello, my dog is cute and'''
_UpperCAmelCase : str = tokenizer(A , return_tensors='''tf''' )
_UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Optional[Any] = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
_UpperCAmelCase : List[Any] = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase : List[str] = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
_UpperCAmelCase : Union[str, Any] = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __lowerCAmelCase ( self ) -> str:
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_UpperCAmelCase : Union[str, Any] = '''Hugging Face is a technology company based in New York and Paris.'''
_UpperCAmelCase : List[str] = bart_tokenizer(A , return_tensors='''tf''' ).input_ids
_UpperCAmelCase : List[Any] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_UpperCAmelCase : str = bart_model.generate(A ).numpy()
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , A=None , **A ) -> Optional[int]:
return super().call(A , **A )
_UpperCAmelCase : List[str] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_UpperCAmelCase : Union[str, Any] = bart_model.generate(A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(A , A ) )
class _UpperCAmelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , **A ) -> List[Any]:
return super().call(A , **A )
_UpperCAmelCase : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase : Optional[int] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase : int = bart_model.generate(A ).numpy()
with self.assertRaises(A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A , foo='''bar''' )
| 263 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
snake_case : List[str] = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case : Dict = model(snake_case__ )["last_hidden_state"]
snake_case : int = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
snake_case : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __magic_name__ ( ) -> Optional[int]:
raise RuntimeError('CUDA out of memory.' )
class lowerCamelCase ( nn.Module ):
def __init__( self ) -> Optional[int]:
super().__init__()
snake_case = nn.Linear(3, 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4, 5 )
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_, [128, 64, 32, 16, 8] )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_, lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case , snake_case = mock_training_loop_function('hello' )
self.assertListEqual(lowercase_, [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga], [8, 'hello'] )
def _lowerCamelCase ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_, lowercase_, lowercase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128, 'hello', 'world' )
self.assertIn('Batch size was passed into `f`', cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!', cm.exception.args[0] )
@require_cuda
def _lowerCamelCase ( self ) -> int:
snake_case = torch.cuda.memory_allocated()
snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), lowercase_ )
snake_case = release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated(), lowercase_ )
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.