code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase = {
'camembert-base': 512,
}
lowerCAmelCase = '▁'
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: str="<s>" , UpperCamelCase_: str="</s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: Dict="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: int="<mask>" , UpperCamelCase_: Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: List[Any] , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
lowercase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCamelCase_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''''''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(UpperCamelCase_ )
lowercase__ = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __getstate__( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self: Any , UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 110 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( __lowercase : Features ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = np.inf
def set_batch_size(__lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ):
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , )
def _lowerCamelCase ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ):
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ):
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__A )
written += batch.nbytes
writer.close()
return written
| 53 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_A = nn.Parameter(_lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_A = nn.Parameter(_lowercase )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = np.asarray(weights[0] )
_A = np.asarray(weights[1] )
_A = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = np.asarray(weights[0] )
_A = np.asarray(weights[1] )
_A = np.asarray(weights[2] )
_A = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = weights[0][0][0]
_A = np.asarray(layer_norm_a[0] )
_A = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# lsh weights + output
_A = weights[0][1]
if len(_lowercase ) < 4:
set_layer_weights_in_torch_lsh(_lowercase , torch_block.attention , _lowercase )
else:
set_layer_weights_in_torch_local(_lowercase , torch_block.attention , _lowercase )
# intermediate weighs
_A = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowercase ) == 4:
_A = intermediate_weights[2]
# layernorm 2
_A = np.asarray(intermediate_weights[0][0] )
_A = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# intermediate dense
_A = np.asarray(intermediate_weights[1][0] )
_A = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
# intermediate out
_A = np.asarray(intermediate_weights[4][0] )
_A = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = torch_model.reformer
# word embeds
_A = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowercase ) , )
if isinstance(weights[3] , _lowercase ):
_A = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_A = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_A = nn.Parameter(torch.tensor(_lowercase ) )
_A = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_A = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowercase , _lowercase , _lowercase )
# output layer norm
_A = np.asarray(weights[7][0] )
_A = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# output embeddings
_A = np.asarray(weights[9][0] )
_A = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = ReformerConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
_A = ReformerModelWithLMHead(_lowercase )
with open(_lowercase , '''rb''' ) as f:
_A = pickle.load(_lowercase )['''weights''']
set_model_weights_in_torch(_lowercase , _lowercase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 75 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['OwlViTFeatureExtractor']
__A = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_a = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
_a = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __lowerCamelCase ( _lowercase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = ["input_ids", "attention_mask"]
UpperCamelCase__ = DistilBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(__a , normalizer_state.pop('type' ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**__a )
_UpperCAmelCase = do_lower_case
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 39 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase__ = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCAmelCase__ = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCAmelCase__ = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def snake_case_ (self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None , __a="warn" , ) -> str:
UpperCamelCase = recall_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a , zero_division=__a , )
return {"recall": float(__a ) if score.size == 1 else score}
| 153 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_a : int = x_den * y_den * z_den
_a : int = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase__ ( UpperCamelCase__ = 3_5 ):
'''simple docstring'''
_a : set = set()
_a : int
_a : Fraction = Fraction(0 )
_a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_a : Optional[Any] = x_num * y_den + x_den * y_num
_a : Union[str, Any] = x_den * y_den
_a : Union[str, Any] = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Union[str, Any] = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=2
_a : Tuple = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_a : Any = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase_ ) and is_sq(UpperCAmelCase_ ):
_a : List[str] = int(sqrt(UpperCAmelCase_ ) )
_a : Optional[Any] = int(sqrt(UpperCAmelCase_ ) )
_a : Tuple = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : int = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=-1
_a : int = x_num * y_num
_a : Any = x_den * y_num + x_num * y_den
_a : Tuple = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Tuple = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=2
_a : Dict = x_num * x_num * y_num * y_num
_a : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase_ ) and is_sq(UpperCAmelCase_ ):
_a : Tuple = int(sqrt(UpperCAmelCase_ ) )
_a : Dict = int(sqrt(UpperCAmelCase_ ) )
_a : Union[str, Any] = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Union[str, Any] = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
for num, den in unique_s:
total += Fraction(UpperCAmelCase_ , UpperCAmelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 357 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase : str = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _A ( unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE : List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
SCREAMING_SNAKE_CASE_ : Dict = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ : int = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
SCREAMING_SNAKE_CASE_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('This is great !' , return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier('This is great !' , return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier(['This is great !', 'Something else'] , return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
SCREAMING_SNAKE_CASE_ : Dict = text_classifier(['This is great !', 'Something else'] , return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : str = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
SCREAMING_SNAKE_CASE_ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline('text-classification' )
SCREAMING_SNAKE_CASE_ : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Any = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = pipeline('text-classification' , framework='tf' )
SCREAMING_SNAKE_CASE_ : Any = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TextClassificationPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE_ : List[Any] = 'HuggingFace is in'
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
SCREAMING_SNAKE_CASE_ : Tuple = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE_ : Dict = text_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )}, {'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [[{'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )}] * N, [{'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )}] * N] , )
SCREAMING_SNAKE_CASE_ : Any = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE_ : str = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
text_classifier(_SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE_ : str = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'label': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 253 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Optional[int] = [1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 0, 0
UpperCAmelCase__ : int = ugly_nums[ia] * 2
UpperCAmelCase__ : Dict = ugly_nums[ia] * 3
UpperCAmelCase__ : Tuple = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ugly_nums.append(lowerCAmelCase__ )
if next_num == next_a:
ia += 1
UpperCAmelCase__ : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase__ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase__ : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_0_0) = }""")
| 299 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299 | 1 |
"""simple docstring"""
from math import sqrt
def lowercase_ ( _snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( _snake_case = 10_001 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : int = 1
while count != nth and number < 3:
number += 1
if is_prime(_snake_case ):
count += 1
while count != nth:
number += 2
if is_prime(_snake_case ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case (__lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
_snake_case : Any = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : int = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def snake_case (__lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_snake_case : Tuple = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected | 284 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = True , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Dict = size if size is not None else {"shortest_edge": 224}
_snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case : Any = crop_size if crop_size is not None else {"height": 256, "width": 256}
_snake_case : str = get_size_dict(lowercase_ , param_name="crop_size" )
_snake_case : List[Any] = do_resize
_snake_case : Tuple = size
_snake_case : Union[str, Any] = resample
_snake_case : str = do_rescale
_snake_case : Dict = rescale_factor
_snake_case : int = do_center_crop
_snake_case : int = crop_size
_snake_case : List[Any] = do_flip_channel_order
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PIL.Image.BILINEAR , lowercase_ = None , **lowercase_ , ):
_snake_case : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_snake_case : int = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
_snake_case : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
return flip_channel_order(lowercase_ , data_format=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
_snake_case : Dict = do_resize if do_resize is not None else self.do_resize
_snake_case : Union[str, Any] = resample if resample is not None else self.resample
_snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_snake_case : Optional[int] = size if size is not None else self.size
_snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
_snake_case : Union[str, Any] = get_size_dict(lowercase_ , param_name="crop_size" )
_snake_case : Tuple = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_snake_case : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_snake_case : List[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_snake_case : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_snake_case : int = [self.flip_channel_order(image=lowercase_ ) for image in images]
_snake_case : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_snake_case : List[Any] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase_ ):
_snake_case : Union[str, Any] = target_sizes.numpy()
_snake_case : int = []
for idx in range(len(lowercase_ ) ):
_snake_case : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ )
_snake_case : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
_snake_case : List[Any] = logits.argmax(dim=1 )
_snake_case : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 284 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
"""simple docstring"""
lowerCamelCase__ : str =point_y / 4 / point_x
lowerCamelCase__ : Any =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase__ : Optional[Any] =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase__ : Optional[int] =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase__ : List[str] =outgoing_gradient**2 + 4
lowerCamelCase__ : Optional[Any] =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase__ : Union[str, Any] =(point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase__ : Optional[int] =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase__ : Tuple =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase__ : Union[str, Any] =x_minus if isclose(__lowerCamelCase , __lowerCamelCase ) else x_plus
lowerCamelCase__ : Optional[Any] =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case__ ( __lowerCamelCase : float = 1.4 , __lowerCamelCase : float = -9.6 ):
"""simple docstring"""
lowerCamelCase__ : int =0
lowerCamelCase__ : float =first_x_coord
lowerCamelCase__ : float =first_y_coord
lowerCamelCase__ : float =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =next_point(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 238 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((lowerCamelCase__) , (lowerCamelCase__)) : Any =extended_euclid(__lowerCamelCase , a % b )
lowerCamelCase__ : Optional[Any] =a // b
return (y, x - k * y)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
((lowerCamelCase__) , (lowerCamelCase__)) : Any =extended_euclid(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[Any] =na * na
lowerCamelCase__ : Union[str, Any] =ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
((lowerCamelCase__) , (lowerCamelCase__)) : int =extended_euclid(__lowerCamelCase , __lowerCamelCase )
if b < 0:
lowerCamelCase__ : Any =(b % n + n) % n
return b
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any =invert_modulo(__lowerCamelCase , __lowerCamelCase ), invert_modulo(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple =na * na
lowerCamelCase__ : Optional[Any] =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 238 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
__lowerCAmelCase = '''gptsan-japanese'''
__lowerCAmelCase = [
'''past_key_values''',
]
__lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self : List[str] , _lowerCAmelCase : Union[str, Any]=3_6000 , _lowerCAmelCase : List[str]=1280 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : Optional[Any]=8192 , _lowerCAmelCase : Dict=4096 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Optional[int]=10 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=128 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Optional[Any]=1e-5 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Any="float32" , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=0.002 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=3_5998 , _lowerCAmelCase : Optional[int]=3_5995 , _lowerCAmelCase : Optional[int]=3_5999 , **_lowerCAmelCase : Tuple , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = d_ff
A = d_ext
A = d_spout
A = num_switch_layers
A = num_ext_layers
A = num_switch_layers + num_ext_layers
A = num_heads
A = num_experts
A = expert_capacity
A = dropout_rate
A = layer_norm_epsilon
A = router_bias
A = router_jitter_noise
A = router_dtype
A = router_ignore_padding_tokens
A = output_hidden_states
A = output_attentions
A = initializer_factor
A = output_router_logits
A = use_cache
super().__init__(
separator_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name='crop_size' )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE = do_convert_rgb
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCAmelCase__ , size=size['shortest_edge'] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase__ , size=(size['height'], size['width']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , param_name='size' , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , param_name='crop_size' , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 113 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> str:
SCREAMING_SNAKE_CASE = int(SCREAMING_SNAKE_CASE_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(SCREAMING_SNAKE_CASE_ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> str:
SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ).strip()
if not number:
raise ValueError('No input value was provided' )
SCREAMING_SNAKE_CASE = '-' if number.startswith('-' ) else ''
SCREAMING_SNAKE_CASE = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 113 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
'''simple docstring'''
def __init__( self : Dict , A : Optional[Any] , A : int , A : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
__snake_case: List[str] = img
__snake_case: Dict = img.shape[1]
__snake_case: Dict = img.shape[0]
__snake_case: Tuple = dst_width
__snake_case: int = dst_height
__snake_case: List[Any] = self.src_w / self.dst_w
__snake_case: Dict = self.src_h / self.dst_h
__snake_case: Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self : Tuple ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__snake_case: Tuple = self.img[self.get_y(A )][self.get_x(A )]
def UpperCAmelCase__ ( self : str , A : int ):
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self : Any , A : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = 800, 600
__UpperCAmelCase : Any = imread("image_data/lena.jpg", 1)
__UpperCAmelCase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 353 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """pixel_values"""
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : str = TimmBackboneConfig
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(self , "timm" )
super().__init__(UpperCamelCase__ )
lowerCamelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowerCamelCase : List[Any] = getattr(UpperCamelCase__ , "use_pretrained_backbone" , UpperCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase : str = config.out_indices if getattr(UpperCamelCase__ , "out_indices" , UpperCamelCase__ ) is not None else (-1,)
lowerCamelCase : Optional[Any] = timm.create_model(
config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase : List[Any] = self._backbone.return_layers
lowerCamelCase : Optional[int] = {layer["module"]: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase__ )
@classmethod
def _lowercase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase : Dict = kwargs.pop("config" , TimmBackboneConfig() )
lowerCamelCase : List[Any] = kwargs.pop("use_timm_backbone" , UpperCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowerCamelCase : Any = kwargs.pop("num_channels" , config.num_channels )
lowerCamelCase : Tuple = kwargs.pop("features_only" , config.features_only )
lowerCamelCase : Optional[Any] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowerCamelCase : Optional[int] = kwargs.pop("out_indices" , config.out_indices )
lowerCamelCase : Optional[Any] = TimmBackboneConfig(
backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , )
return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
pass
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase : Any = self._all_layers
lowerCamelCase : Union[str, Any] = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : int = self._return_layers
lowerCamelCase : List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase : Optional[int] = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = tuple(UpperCamelCase__ )
lowerCamelCase : List[str] = tuple(UpperCamelCase__ ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase : Dict = (feature_maps,)
if output_hidden_states:
lowerCamelCase : List[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
| 48 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """ctrl"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Optional[int] = n_embd
_lowerCamelCase : str = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : Any = dff
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = use_cache
super().__init__(**lowercase ) | 96 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 - _cos) / 2
_snake_case = 1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 + _cos) / 2
_snake_case = -1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = _sin / 2
_snake_case = 0
_snake_case = -ba
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 1 - alpha
_snake_case = -2 * _cos
_snake_case = 1 + alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = 1 + alpha * big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha * big_a
_snake_case = 1 + alpha / big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha / big_a
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (pmc + aaa)
_snake_case = 2 * big_a * mpc
_snake_case = big_a * (pmc - aaa)
_snake_case = ppmc + aaa
_snake_case = -2 * pmpc
_snake_case = ppmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (ppmc + aaa)
_snake_case = -2 * big_a * pmpc
_snake_case = big_a * (ppmc - aaa)
_snake_case = pmc + aaa
_snake_case = 2 * mpc
_snake_case = pmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 278 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A = {'''allegro/herbert-base-cased''': 5_14}
__A = {}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = HerbertTokenizer
def __init__( self : Tuple , A__ : str=None , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Optional[int]="<s>" , A__ : Optional[int]="<unk>" , A__ : str="<pad>" , A__ : List[Any]="<mask>" , A__ : Dict="</s>" , **A__ : Optional[int] , ) -> Optional[int]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase_ ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 278 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger("transformers.models.encodec")
_lowerCamelCase : int = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_lowerCamelCase : Optional[int] = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_lowerCamelCase : Optional[Any] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_lowerCamelCase : int = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_lowerCamelCase : Union[str, Any] = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_lowerCamelCase : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCamelCase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = []
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase = getattr(A__ , A__ )
if weight_type is not None:
UpperCamelCase = getattr(A__ , A__ ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "weight_ih_l0":
UpperCamelCase = value
elif weight_type == "weight_hh_l0":
UpperCamelCase = value
elif weight_type == "bias_ih_l0":
UpperCamelCase = value
elif weight_type == "bias_hh_l0":
UpperCamelCase = value
elif weight_type == "weight_ih_l1":
UpperCamelCase = value
elif weight_type == "weight_hh_l1":
UpperCamelCase = value
elif weight_type == "bias_ih_l1":
UpperCamelCase = value
elif weight_type == "bias_hh_l1":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(A__ , A__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A__ )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , A__ )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "weight_ih_l0" in name:
UpperCamelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
UpperCamelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
UpperCamelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
UpperCamelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
UpperCamelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
UpperCamelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
UpperCamelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
UpperCamelCase = 'bias_hh_l1'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase = EncodecConfig.from_pretrained(A__ )
else:
UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase = [8, 5, 4, 4]
UpperCamelCase = [2.2]
UpperCamelCase = 64
UpperCamelCase = 32_000
UpperCamelCase = 2_048
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
elif model_name == "encodec_48khz":
UpperCamelCase = [8, 5, 4, 2]
UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase = 48_000
UpperCamelCase = 2
UpperCamelCase = False
UpperCamelCase = 'time_group_norm'
UpperCamelCase = True
UpperCamelCase = 1.0
UpperCamelCase = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase = EncodecModel(A__ )
UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A__ )
UpperCamelCase = torch.load(A__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase = original_checkpoint['best_state']
recursively_load_weights(A__ , A__ , A__ )
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCamelCase : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__UpperCAmelCase :Tuple = Mapping[str, np.ndarray]
__UpperCAmelCase :List[Any] = Mapping[str, Any] # Is a nested dict.
__UpperCAmelCase :Optional[Any] = 0.01
@dataclasses.dataclass(frozen=_a )
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
SCREAMING_SNAKE_CASE : Optional[str] = None
# Templates used to generate this protein (prediction-only)
SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None
# Chain corresponding to each parent
SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = R'''(\[[A-Z]+\]\n)'''
__UpperCAmelCase : List[str] = [tag.strip() for tag in re.split(_lowercase , _lowercase ) if len(_lowercase ) > 0]
__UpperCAmelCase : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
__UpperCAmelCase : List[str] = ["N", "CA", "C"]
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Dict = None
for g in groups:
if "[PRIMARY]" == g[0]:
__UpperCAmelCase : Optional[Any] = g[1][0].strip()
for i in range(len(_lowercase ) ):
if seq[i] not in residue_constants.restypes:
__UpperCAmelCase : str = '''X''' # FIXME: strings are immutable
__UpperCAmelCase : Optional[Any] = np.array(
[residue_constants.restype_order.get(_lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__UpperCAmelCase : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowercase , g[1][axis].split() ) ) )
__UpperCAmelCase : List[str] = np.array(_lowercase )
__UpperCAmelCase : Union[str, Any] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__UpperCAmelCase : Optional[int] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__UpperCAmelCase : Optional[Any] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
__UpperCAmelCase : Optional[int] = np.zeros(
(
len(_lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__UpperCAmelCase : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowercase , atom_mask=_lowercase , aatype=_lowercase , residue_index=np.arange(len(_lowercase ) ) , b_factors=_lowercase , )
def _a ( _lowercase : Protein , _lowercase : int = 0 ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = prot.remark
if remark is not None:
pdb_headers.append(F'REMARK {remark}' )
__UpperCAmelCase : int = prot.parents
__UpperCAmelCase : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__UpperCAmelCase : List[Any] = [p for i, p in zip(_lowercase , _lowercase ) if i == chain_id]
if parents is None or len(_lowercase ) == 0:
__UpperCAmelCase : str = ['''N/A''']
pdb_headers.append(F'PARENT {" ".join(_lowercase )}' )
return pdb_headers
def _a ( _lowercase : Protein , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = pdb_str.split('''\n''' )
__UpperCAmelCase : Tuple = prot.remark
if remark is not None:
out_pdb_lines.append(F'REMARK {remark}' )
__UpperCAmelCase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__UpperCAmelCase : List[Any] = []
if prot.parents_chain_index is not None:
__UpperCAmelCase : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowercase ) , [] )
parent_dict[str(_lowercase )].append(_lowercase )
__UpperCAmelCase : List[str] = max([int(_lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__UpperCAmelCase : Tuple = parent_dict.get(str(_lowercase ) , ['''N/A'''] )
parents_per_chain.append(_lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__UpperCAmelCase : str = [['''N/A''']]
def make_parent_line(_lowercase : Sequence[str] ) -> str:
return F'PARENT {" ".join(_lowercase )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__UpperCAmelCase : int = 0
for i, l in enumerate(_lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowercase ):
__UpperCAmelCase : List[str] = parents_per_chain[chain_counter]
else:
__UpperCAmelCase : Dict = ['''N/A''']
out_pdb_lines.append(make_parent_line(_lowercase ) )
return "\n".join(_lowercase )
def _a ( _lowercase : Protein ):
'''simple docstring'''
__UpperCAmelCase : Dict = residue_constants.restypes + ['''X''']
def res_atoa(_lowercase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
__UpperCAmelCase : Any = residue_constants.atom_types
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = prot.atom_mask
__UpperCAmelCase : Union[str, Any] = prot.aatype
__UpperCAmelCase : Optional[int] = prot.atom_positions
__UpperCAmelCase : List[str] = prot.residue_index.astype(np.intaa )
__UpperCAmelCase : Any = prot.b_factors
__UpperCAmelCase : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
__UpperCAmelCase : List[Any] = get_pdb_headers(_lowercase )
if len(_lowercase ) > 0:
pdb_lines.extend(_lowercase )
__UpperCAmelCase : List[str] = aatype.shape[0]
__UpperCAmelCase : int = 1
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Union[str, Any] = string.ascii_uppercase
__UpperCAmelCase : str = None
# Add all atom sites.
for i in range(_lowercase ):
__UpperCAmelCase : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__UpperCAmelCase : Union[str, Any] = '''ATOM'''
__UpperCAmelCase : Dict = atom_name if len(_lowercase ) == 4 else F' {atom_name}'
__UpperCAmelCase : Any = ''''''
__UpperCAmelCase : Any = ''''''
__UpperCAmelCase : int = 1.00
__UpperCAmelCase : List[str] = atom_name[0] # Protein supports only C, N, O, S, this works.
__UpperCAmelCase : Tuple = ''''''
__UpperCAmelCase : Any = '''A'''
if chain_index is not None:
__UpperCAmelCase : Optional[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__UpperCAmelCase : Dict = (
F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
F'{res_name_a:>3} {chain_tag:>1}'
F'{residue_index[i]:>4}{insertion_code:>1} '
F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
F'{occupancy:>6.2f}{b_factor:>6.2f} '
F'{element:>2}{charge:>2}'
)
pdb_lines.append(_lowercase )
atom_index += 1
__UpperCAmelCase : Any = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Dict = chain_index[i + 1]
if should_terminate:
# Close the chain.
__UpperCAmelCase : List[Any] = '''TER'''
__UpperCAmelCase : Optional[Any] = (
F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(_lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowercase , _lowercase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(_lowercase )
def _a ( _lowercase : Protein ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _a ( _lowercase : FeatureDict , _lowercase : ModelOutput , _lowercase : Optional[np.ndarray] = None , _lowercase : Optional[np.ndarray] = None , _lowercase : Optional[str] = None , _lowercase : Optional[Sequence[str]] = None , _lowercase : Optional[Sequence[int]] = None , ):
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=_lowercase , remark=_lowercase , parents=_lowercase , parents_chain_index=_lowercase , ) | 240 |
'''simple docstring'''
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
__UpperCAmelCase : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 240 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _lowerCamelCase ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Dict ) -> Optional[Any]:
_a = AutoConfig.from_pretrained(lowercase )
_a = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase )
_a = checkpoints.load_tax_checkpoint(lowercase )
_a = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_a = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_a = F'layers_{str(lowercase )}'
# Self-Attention
_a = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_a = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_a = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_a = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_a = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_a = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_a = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_a = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_a = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_a = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_a = flax_model.params["encoder"]["block"][str(lowercase )]["layer"]
_a = tax_attention_key
_a = tax_attention_out
_a = tax_attention_query
_a = tax_attention_value
_a = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a = tax_global_layer_norm
if split_mlp_wi:
_a = tax_mlp_wi_a
_a = tax_mlp_wi_a
else:
_a = tax_mlp_wi
_a = tax_mlp_wo
_a = tax_mlp_layer_norm
_a = flax_model_encoder_layer_block
# Only for layer 0:
_a = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_a = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_a = tax_encoder_global_rel_embedding
# Assigning
_a = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_a = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a = F'layers_{str(lowercase )}'
# Self-Attention
_a = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_a = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_a = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_a = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_a = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_a = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_a = tax_enc_dec_attention_module["key"]["kernel"]
_a = tax_enc_dec_attention_module["out"]["kernel"]
_a = tax_enc_dec_attention_module["query"]["kernel"]
_a = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_a = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_a = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_a = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_a = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_a = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_a = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_a = flax_model.params["decoder"]["block"][str(lowercase )]["layer"]
_a = tax_attention_key
_a = tax_attention_out
_a = tax_attention_query
_a = tax_attention_value
_a = tax_pre_attention_layer_norm
_a = tax_enc_dec_attention_key
_a = tax_enc_dec_attention_out
_a = tax_enc_dec_attention_query
_a = tax_enc_dec_attention_value
_a = tax_cross_layer_norm
if split_mlp_wi:
_a = tax_mlp_wi_a
_a = tax_mlp_wi_a
else:
_a = tax_mlp_wi
_a = tax_mlp_wo
_a = txa_mlp_layer_norm
_a = flax_model_decoder_layer_block
# Decoder Normalization
_a = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_a = txa_decoder_norm
# Only for layer 0:
_a = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_a = tax_decoder_rel_embedding
# Token Embeddings
_a = tax_model["target"]["token_embedder"]["embedding"]
_a = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowercase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 63 |
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCAmelCase : Tuple = tuple[int, int]
class lowercase :
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = vertices
UpperCamelCase = {
(min(A_ ), max(A_ )): weight for edge, weight in edges.items()
}
def __UpperCamelCase ( self , A_ , A_ ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase = weight
def __UpperCamelCase ( self ) -> Graph:
"""simple docstring"""
UpperCamelCase = Graph({min(self.vertices )} , {} )
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase = edge
UpperCamelCase = weight
subgraph.add_edge(A_ , A_ )
return subgraph
def A ( lowercase = "p107_network.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = os.path.abspath(os.path.dirname(lowercase ) )
UpperCamelCase = os.path.join(lowercase , lowercase )
UpperCamelCase = {}
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
with open(lowercase ) as f:
UpperCamelCase = f.read().strip().split('\n' )
UpperCamelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase ) ):
for edgea in range(lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase = Graph(set(range(len(lowercase ) ) ) , lowercase )
UpperCamelCase = graph.prims_algorithm()
UpperCamelCase = sum(graph.edges.values() )
UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 222 | 0 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j], end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_SCREAMING_SNAKE_CASE : Tuple = arr[i]
combination_util(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, index + 1, __lowerCamelCase, i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# A temporary array to store all combination one by one
_SCREAMING_SNAKE_CASE : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, 0, __lowerCamelCase, 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCamelCase__ =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu | 325 |
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 325 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
_lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_SCREAMING_SNAKE_CASE = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_SCREAMING_SNAKE_CASE = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_SCREAMING_SNAKE_CASE = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 158 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = 0 ):
'''simple docstring'''
__snake_case , __snake_case : str = row, column
__snake_case : str = [[default_value for c in range(a_ )] for r in range(a_ )]
def __str__(self ):
'''simple docstring'''
__snake_case : Union[str, Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__snake_case : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Union[str, Any] = max(a_ , len(str(a_ ) ) )
__snake_case : str = f"""%{max_element_length}s"""
# Make string and return
def single_line(a_ ) -> str:
nonlocal string_format_identifier
__snake_case : Any = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a_ ) for row_vector in self.array )
return s
def __repr__(self ):
'''simple docstring'''
return str(self )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not (isinstance(a_ , (list, tuple) ) and len(a_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self , a_ ):
'''simple docstring'''
assert self.validate_indicies(a_ )
return self.array[loc[0]][loc[1]]
def __setitem__(self , a_ , a_ ):
'''simple docstring'''
assert self.validate_indicies(a_ )
__snake_case : Dict = value
def __add__(self , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__(self ):
'''simple docstring'''
__snake_case : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[str] = -self[r, c]
return result
def __sub__(self , a_ ):
'''simple docstring'''
return self + (-another)
def __mul__(self , a_ ):
'''simple docstring'''
if isinstance(a_ , (int, float) ): # Scalar multiplication
__snake_case : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c] * another
return result
elif isinstance(a_ , a_ ): # Matrix multiplication
assert self.column == another.row
__snake_case : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : str = f"""Unsupported type given for another ({type(a_ )})"""
raise TypeError(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Any = self[r, c]
return result
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : Tuple = v.transpose()
__snake_case : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Union[str, Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__snake_case : Union[str, Any] = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 2, -3
__snake_case : Optional[Any] = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Dict = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}""" )
def lowercase ( ) ->None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Any = data
__lowercase : Node | None = None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Dict = None
__lowercase : Dict = None
def __iter__( self : int ) -> Iterator[Any]:
"""simple docstring"""
__lowercase : Tuple = self.head
while self.head:
yield node.data
__lowercase : Dict = node.next
if node == self.head:
break
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[int] ) -> Any:
"""simple docstring"""
return "->".join(str(__a ) for item in iter(self ) )
def lowerCAmelCase ( self : Optional[Any] , __a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , __a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , __a )
def lowerCAmelCase ( self : Union[str, Any] , __a : int , __a : Any ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
__lowercase : str = Node(__a )
if self.head is None:
__lowercase : Tuple = new_node # first node points itself
__lowercase : List[str] = new_node
elif index == 0: # insert at head
__lowercase : Dict = self.head
__lowercase : int = new_node
else:
__lowercase : Optional[Any] = self.head
for _ in range(index - 1 ):
__lowercase : Union[str, Any] = temp.next
__lowercase : Optional[Any] = temp.next
__lowercase : Union[str, Any] = new_node
if index == len(self ) - 1: # insert at tail
__lowercase : Dict = new_node
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.delete_nth(0 )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase ( self : int , __a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
__lowercase : Tuple = self.head
if self.head == self.tail: # just one node
__lowercase : Optional[Any] = None
elif index == 0: # delete head node
__lowercase : Optional[int] = self.tail.next.next
__lowercase : Union[str, Any] = self.head.next
else:
__lowercase : str = self.head
for _ in range(index - 1 ):
__lowercase : List[str] = temp.next
__lowercase : Optional[Any] = temp.next
__lowercase : List[str] = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowercase : str = temp
return delete_node.data
def lowerCAmelCase ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return len(self ) == 0
def snake_case_ ( ):
__lowercase : Optional[int] = CircularLinkedList()
assert len(lowerCAmelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCAmelCase_ ) == i
circular_linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 233 |
from maths.prime_check import is_prime
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 233 | 1 |
'''simple docstring'''
import os
import string
import sys
a_ = 1 << 8
a_ = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
a_ = KEYMAP['up']
a_ = KEYMAP['left']
if sys.platform == "win32":
a_ = []
a_ = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a_ = ord(str(i))
def _a( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE__ : List[Any] ='''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCamelCase__ ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE__ : Union[str, Any] =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE__ : str =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCamelCase__ )
if ord(UpperCamelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
SCREAMING_SNAKE_CASE__ : List[str] =chr(KEYMAP['''esc'''] )
except KeyError:
SCREAMING_SNAKE_CASE__ : Tuple =cha[1]
else:
SCREAMING_SNAKE_CASE__ : List[str] =ch.decode(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Dict =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE__ : Dict =sys.stdin.fileno()
SCREAMING_SNAKE_CASE__ : int =termios.tcgetattr(UpperCamelCase__ )
try:
tty.setraw(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCamelCase__, termios.TCSADRAIN, UpperCamelCase__ )
return ch
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =get_raw_chars()
if ord(UpperCamelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCamelCase__ ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE__ : Optional[int] =get_raw_chars()
if ord(UpperCamelCase__ ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE__ : int =get_raw_chars()
if ord(UpperCamelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCamelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCamelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 363 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """facebook/bart-large-mnli"""
snake_case_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
snake_case_ = """text_classifier"""
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ["""text""", ["""text"""]]
snake_case_ = ["""text"""]
def __magic_name__ ( self : Any ) -> Any:
super().setup()
SCREAMING_SNAKE_CASE__ : int =self.model.config
SCREAMING_SNAKE_CASE__ : int =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
SCREAMING_SNAKE_CASE__ : Any =int(__lowercase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __magic_name__ ( self : str , __lowercase : Any , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =labels
return self.pre_processor(
[text] * len(__lowercase ) , [F"This example is {label}" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =outputs.logits
SCREAMING_SNAKE_CASE__ : Any =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 222 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[int] = self.dummy_uncond_unet
lowerCAmelCase_ : Optional[Any] = PNDMScheduler()
lowerCAmelCase_ : int = PNDMPipeline(unet=__lowercase , scheduler=__lowercase )
pndm.to(__lowercase )
pndm.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : List[str] = torch.manual_seed(0 )
lowerCAmelCase_ : Any = pndm(generator=__lowercase , num_inference_steps=2_0 , output_type='''numpy''' ).images
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pndm(generator=__lowercase , num_inference_steps=2_0 , output_type='''numpy''' , return_dict=__lowercase )[0]
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = '''google/ddpm-cifar10-32'''
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = PNDMScheduler()
lowerCAmelCase_ : Any = PNDMPipeline(unet=__lowercase , scheduler=__lowercase )
pndm.to(__lowercase )
pndm.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pndm(generator=__lowercase , output_type='''numpy''' ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ : int = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 262 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : int = 50 ):
lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 55 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : list[tuple[float, float]] ) -> str:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float = 0.01 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 55 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase_ : List[Any] = 16 , lowercase_ : Optional[Any] = 88 , lowercase_ : str = None , lowercase_ : Optional[int] = 1 , lowercase_ : List[Any] = 0.0 , lowercase_ : Any = 32 , lowercase_ : List[Any] = None , lowercase_ : Optional[Any] = False , lowercase_ : Union[str, Any] = None , lowercase_ : List[str] = None , lowercase_ : Optional[int] = "geglu" , lowercase_ : Union[str, Any] = None , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE_ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE_ : Dict = [1, 0]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : List[str] = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = hidden_states
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE_ : Any = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE_ : Dict = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE_ : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A)
| 91 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return image
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ , a__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __lowercase ( a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 3_64 if 'coco' in model_name else 2_24
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def __lowercase ( a__ , a__=None , a__=False ) -> Any:
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(a__ , eos_token_id=a__ )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(a__ ).eval()
__SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('Done!' )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
if key.startswith('Qformer.bert' ):
__SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace('self' , 'attention' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__SCREAMING_SNAKE_CASE = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__SCREAMING_SNAKE_CASE = key.replace('t5' , 'language' )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors['eval'](a__ ).unsqueeze(0 ).to(a__ )
__SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ )
__SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a__ )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
lowerCAmelCase__ : Union[str, Any] =[
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ : int =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="ZinengTang/tvlt-base"
lowerCamelCase_ =tempfile.mkdtemp()
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> List[str]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case__ )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Dict:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case__ )
def _snake_case ( self )-> List[Any]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , snake_case__ )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
lowerCamelCase_ =np.ones([1_2000] )
lowerCamelCase_ =feature_extractor(snake_case__ , return_tensors="""np""" )
lowerCamelCase_ =processor(audio=snake_case__ , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
lowerCamelCase_ =np.ones([3, 224, 224] )
lowerCamelCase_ =image_processor(snake_case__ , return_tensors="""np""" )
lowerCamelCase_ =processor(images=snake_case__ , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
lowerCamelCase_ =np.ones([1_2000] )
lowerCamelCase_ =np.ones([3, 224, 224] )
lowerCamelCase_ =processor(audio=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 360 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )-> Dict:
lowerCamelCase_ =None
lowerCamelCase_ =20
lowerCamelCase_ =self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
lowerCamelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ =jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )-> Any:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create ramp distribution
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ =5
lowerCamelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ =top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ =np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ =ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
lowerCamelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ =5
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =15
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ =1
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> int:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =5
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ =4
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCamelCase_ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
lowerCamelCase_ = PipelineDataFormat.from_str(
format=lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase , lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , A_ : Pipeline , A_ : PipelineDataFormat ) -> str:
"""simple docstring"""
lowerCamelCase_ = nlp
lowerCamelCase_ = reader
@staticmethod
def a__ ( A_ : ArgumentParser ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=A_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=A_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=A_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=A_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=A_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=A_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=A_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=A_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self._nlp, []
for entry in self._reader:
lowerCamelCase_ = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ , A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCamelCase_ = self._reader.save_binary(A_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A_ )
| 204 |
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowercase ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowercase , y[k] )
lowerCamelCase_ = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase_):
_lowercase : Union[str, Any] = """funnel"""
_lowercase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=[4, 4, 4] , lowerCAmelCase__=None , lowerCAmelCase__=2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=None , lowerCAmelCase__=1E-9 , lowerCAmelCase__="mean" , lowerCAmelCase__="relative_shift" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] =vocab_size
a__ : Dict =block_sizes
a__ : List[str] =[1] * len(__SCREAMING_SNAKE_CASE ) if block_repeats is None else block_repeats
assert len(__SCREAMING_SNAKE_CASE ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a__ : Union[str, Any] =num_decoder_layers
a__ : Dict =d_model
a__ : List[Any] =n_head
a__ : int =d_head
a__ : Optional[int] =d_inner
a__ : int =hidden_act
a__ : Optional[Any] =hidden_dropout
a__ : Optional[Any] =attention_dropout
a__ : int =activation_dropout
a__ : Dict =initializer_range
a__ : Tuple =initializer_std
a__ : List[Any] =layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
a__ : List[str] =pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
a__ : List[str] =attention_type
a__ : int =separate_cls
a__ : Tuple =truncate_seq
a__ : Union[str, Any] =pool_q_only
super().__init__(**__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> Any:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 352 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase : Union[str, Any] = 1.054571817E-34 # unit of ℏ : J * s
UpperCAmelCase : Union[str, Any] = 3E8 # unit of c : m * s^-1
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
a__ : Tuple =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a__ : Any =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a__ : List[str] =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
return base * power(__UpperCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
__lowerCAmelCase : int =int(input("""Enter the base: """).strip())
__lowerCAmelCase : List[Any] =int(input("""Enter the exponent: """).strip())
__lowerCAmelCase : Optional[Any] =power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCAmelCase : Optional[Any] =1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 197 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
a : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
a : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 56 | 0 |
'''simple docstring'''
from typing import Any
class __A :
def __init__(self : Union[str, Any] , __a : Any ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
class __A :
def __init__(self : Any ):
UpperCAmelCase_ = None
def _lowercase (self : Any ):
UpperCAmelCase_ = self.head
while temp is not None:
print(temp.data , end=" " )
UpperCAmelCase_ = temp.next
print()
def _lowercase (self : Union[str, Any] , __a : Any ):
UpperCAmelCase_ = Node(__a )
UpperCAmelCase_ = self.head
UpperCAmelCase_ = new_node
def _lowercase (self : List[str] , __a : List[Any] , __a : str ):
if node_data_a == node_data_a:
return
else:
UpperCAmelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ = node_a.next
UpperCAmelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase_ , UpperCAmelCase_ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 106 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Dict , __a : Any ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : int ):
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase_ = [sequences]
UpperCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *__a : Optional[int] , **__a : List[str] ):
UpperCAmelCase_ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowercase (self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowercase (self : Any , __a : Any , __a : int=True , __a : Dict=True , __a : Any=TruncationStrategy.ONLY_FIRST , **__a : Tuple ):
UpperCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase_ = self.tokenizer.eos_token
try:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase (self : List[str] , **__a : Tuple ):
if kwargs.get("multi_class" , __a ) is not None:
UpperCAmelCase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase_ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ = kwargs["hypothesis_template"]
UpperCAmelCase_ = {}
if "multi_label" in kwargs:
UpperCAmelCase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self : Tuple , __a : Union[str, List[str]] , *__a : Optional[Any] , **__a : Tuple , ):
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__a , **__a )
def _lowercase (self : Optional[int] , __a : Optional[Any] , __a : List[str]=None , __a : Any="This example is {}." ):
UpperCAmelCase_ , UpperCAmelCase_ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = inputs["candidate_label"]
UpperCAmelCase_ = inputs["sequence"]
UpperCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase_ = logits.shape[0]
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = N // n
UpperCAmelCase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ = self.entailment_id
UpperCAmelCase_ = -1 if entailment_id == 0 else 0
UpperCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 106 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
if args.model_type == "roberta":
_SCREAMING_SNAKE_CASE = RobertaForMaskedLM.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE = """roberta"""
elif args.model_type == "gpt2":
_SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE = """transformer"""
_SCREAMING_SNAKE_CASE = model.state_dict()
_SCREAMING_SNAKE_CASE = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_SCREAMING_SNAKE_CASE = F'''{prefix}.embeddings.{w}.weight'''
_SCREAMING_SNAKE_CASE = state_dict[param_name]
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = F'''{prefix}.embeddings.LayerNorm.{w}'''
_SCREAMING_SNAKE_CASE = state_dict[param_name]
# Transformer Blocks #
_SCREAMING_SNAKE_CASE = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
_SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''lm_head.dense.{w}''']
_SCREAMING_SNAKE_CASE = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.ln_f.{w}''']
_SCREAMING_SNAKE_CASE = state_dict["""lm_head.weight"""]
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 327 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327 | 1 |
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list:
if len(UpperCAmelCase__ ) < 2:
return collection
def circle_sort_util(UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
lowercase_ : str = False
if low == high:
return swapped
lowercase_ : str = low
lowercase_ : Optional[Any] = high
while left < right:
if collection[left] > collection[right]:
lowercase_ : str = (
collection[right],
collection[left],
)
lowercase_ : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ : List[Any] = (
collection[right + 1],
collection[left],
)
lowercase_ : Optional[Any] = True
lowercase_ : Dict = low + int((high - low) / 2 )
lowercase_ : List[str] = circle_sort_util(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = circle_sort_util(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
return swapped or left_swap or right_swap
lowercase_ : List[Any] = True
while is_not_sorted is True:
lowercase_ : Tuple = circle_sort_util(UpperCAmelCase__ , 0 , len(UpperCAmelCase__ ) - 1 )
return collection
if __name__ == "__main__":
_lowercase : Dict = input("Enter numbers separated by a comma:\n").strip()
_lowercase : int = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 360 | '''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ):
lowercase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ):
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ):
lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Union[str, Any] = after_output[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Optional[int] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
lowercase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : List[str] = to_atuple(vision_model.config.image_size )
lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
lowercase_ : int = inputs_dict
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : str = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : int = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : int = config_inputs_dict.pop("""text_config""" )
lowercase_ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs()
lowercase_ : Dict = model_a(**lowercase_ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : str = model_a(**lowercase_ )
lowercase_ : Union[str, Any] = after_outputs[0]
lowercase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ )
lowercase_ : Dict = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = FlaxViTModelTester(self )
lowercase_ : Optional[Any] = FlaxBertModelTester(self )
lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : Tuple = random_attention_mask([batch_size, 4] )
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ )
lowercase_ : Any = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
lowercase_ : Tuple = FlaxBertModelTester(self )
lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
lowercase_ : List[str] = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
| 21 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """xlm-roberta-xl"""
def __init__( self : List[str] , UpperCAmelCase : Dict=250880 , UpperCAmelCase : int=2560 , UpperCAmelCase : Tuple=36 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=10240 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=514 , UpperCAmelCase : Any=1 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-05 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]="absolute" , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : int = classifier_dropout
class lowerCAmelCase ( __UpperCamelCase ):
@property
def A_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 50 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Tuple = state_dict.pop(__UpperCAmelCase )
lowercase__: Any = val
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
lowercase__: List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__: Optional[int] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase__: List[Any] = value
else:
lowercase__: List[str] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
lowercase__: Any = ''''''
if is_panoptic:
lowercase__: Union[str, Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__: Dict = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__: Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__: List[Any] = in_proj_weight[:2_5_6, :]
lowercase__: List[str] = in_proj_bias[:2_5_6]
lowercase__: Dict = in_proj_weight[2_5_6:5_1_2, :]
lowercase__: Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
lowercase__: Dict = in_proj_weight[-2_5_6:, :]
lowercase__: List[Any] = in_proj_bias[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
lowercase__: Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__: Optional[int] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase__: str = '''resnet101'''
if "dc5" in model_name:
lowercase__: Dict = True
lowercase__: Dict = '''panoptic''' in model_name
if is_panoptic:
lowercase__: Optional[int] = 2_5_0
else:
lowercase__: int = 9_1
lowercase__: Union[str, Any] = '''huggingface/label-files'''
lowercase__: int = '''coco-detection-id2label.json'''
lowercase__: str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__: Any = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase__: List[str] = idalabel
lowercase__: Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase__: str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase__: Any = ConditionalDetrImageProcessor(format=__UpperCAmelCase )
# prepare image
lowercase__: List[Any] = prepare_img()
lowercase__: Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
lowercase__: Tuple = encoding['''pixel_values''']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
lowercase__: Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowercase__: Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase__: int = '''conditional_detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[Any] = rename_backbone_keys(__UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__: Dict = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase__: str = state_dict.pop(__UpperCAmelCase )
lowercase__: Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__: str = state_dict.pop(__UpperCAmelCase )
lowercase__: Optional[int] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase__: Any = state_dict.pop(__UpperCAmelCase )
lowercase__: List[Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase__: str = state_dict.pop(__UpperCAmelCase )
lowercase__: Dict = val
# finally, create HuggingFace model and load state dict
lowercase__: List[str] = ConditionalDetrForSegmentation(__UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase__: str = conditional_detr(__UpperCAmelCase )
lowercase__: Tuple = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = 1
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def a (self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def a (self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def a (self : int ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(a__ )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet_upscale
__snake_case = DDPMScheduler()
__snake_case = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , )
__snake_case = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.Generator(device=a__ ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images
__snake_case = torch.Generator(device=a__ ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=a__ , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
__snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__snake_case = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a (self : str ):
"""simple docstring"""
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet_upscale
__snake_case = DDPMScheduler()
__snake_case = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , )
__snake_case = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images
assert image.shape[0] == 2
__snake_case = torch.Generator(device=a__ ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.dummy_cond_unet_upscale
__snake_case = DDPMScheduler()
__snake_case = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__snake_case = unet.half()
__snake_case = text_encoder.half()
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , )
__snake_case = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , ).images
__snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionUpscalePipeline.from_pretrained(a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__snake_case = '''a cat sitting on a park bench'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__snake_case = '''a cat sitting on a park bench'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a (self : str ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case = '''a cat sitting on a park bench'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , generator=a__ , num_inference_steps=5 , output_type='''np''' , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 24 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = "swin"
lowercase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = depths
__UpperCamelCase = len(__UpperCAmelCase )
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
__UpperCamelCase = mlp_ratio
__UpperCamelCase = qkv_bias
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = hidden_act
__UpperCamelCase = use_absolute_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
__UpperCamelCase = ['stem'] + [F'stage{idx}' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = version.parse("1.11" )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 1E-4
| 263 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "distilbert"
lowercase = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=512 , __UpperCAmelCase=False , __UpperCAmelCase=6 , __UpperCAmelCase=12 , __UpperCAmelCase=768 , __UpperCAmelCase=4 * 768 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.2 , __UpperCAmelCase=0 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = sinusoidal_pos_embds
__UpperCamelCase = n_layers
__UpperCamelCase = n_heads
__UpperCamelCase = dim
__UpperCamelCase = hidden_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation
__UpperCamelCase = initializer_range
__UpperCamelCase = qa_dropout
__UpperCamelCase = seq_classif_dropout
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 263 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__: Optional[int] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 23 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : List[Any] = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = "ctrl"
lowerCAmelCase_ : Optional[Any] = ["past_key_values"]
lowerCAmelCase_ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , UpperCAmelCase_ : int=246534 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Any=1280 , UpperCAmelCase_ : int=8192 , UpperCAmelCase_ : int=48 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=1E-6 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : int , ):
lowerCAmelCase : int = vocab_size
lowerCAmelCase : int = n_positions
lowerCAmelCase : Optional[Any] = n_embd
lowerCAmelCase : Optional[Any] = n_layer
lowerCAmelCase : List[str] = n_head
lowerCAmelCase : Union[str, Any] = dff
lowerCAmelCase : Dict = resid_pdrop
lowerCAmelCase : List[Any] = embd_pdrop
lowerCAmelCase : List[Any] = layer_norm_epsilon
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = use_cache
super().__init__(**UpperCAmelCase_ )
| 138 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["sentencepiece"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
| 39 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Any = logging.get_logger(__name__)
a_ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
a_ : Tuple = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="<unk>" , UpperCamelCase="<sep>" , UpperCamelCase="<pad>" , UpperCamelCase="<cls>" , UpperCamelCase="<mask>" , UpperCamelCase=["<eop>", "<eod>"] , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowerCamelCase_ = 3
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCamelCase_ = jieba
lowerCamelCase_ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.remove_space:
lowerCamelCase_ = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ = inputs
lowerCamelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ = unicodedata.normalize("NFKD" , UpperCamelCase )
lowerCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowerCamelCase_ = outputs.lower()
return outputs
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.preprocess_text(UpperCamelCase )
lowerCamelCase_ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowerCamelCase_ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ = cur_pieces[1:]
else:
lowerCamelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "".join(UpperCamelCase ).replace(UpperCamelCase , " " ).strip()
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 55 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_A : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , ) -> List[str]:
"""simple docstring"""
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase__ : Union[str, Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowerCamelCase__ : str = """cpu"""
lowerCamelCase__ : str = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=_lowerCAmelCase ).to(_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = Path(_lowerCAmelCase )
# TEXT ENCODER
lowerCamelCase__ : Dict = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase__ : Tuple = pipeline.text_encoder.config.hidden_size
lowerCamelCase__ : Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=_lowerCAmelCase , )
del pipeline.text_encoder
# UNET
lowerCamelCase__ : str = pipeline.unet.config.in_channels
lowerCamelCase__ : Optional[int] = pipeline.unet.config.sample_size
lowerCamelCase__ : Tuple = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=_lowerCAmelCase , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , )
lowerCamelCase__ : str = str(unet_path.absolute().as_posix() )
lowerCamelCase__ : Any = os.path.dirname(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = onnx.load(_lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(_lowerCAmelCase )
os.mkdir(_lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
_lowerCAmelCase , _lowerCAmelCase , save_as_external_data=_lowerCAmelCase , all_tensors_to_one_file=_lowerCAmelCase , location='''weights.pb''' , convert_attribute=_lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
lowerCamelCase__ : Any = pipeline.vae
lowerCamelCase__ : Union[str, Any] = vae_encoder.config.in_channels
lowerCamelCase__ : Tuple = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase__ : Optional[int] = lambda UpperCAmelCase , UpperCAmelCase : vae_encoder.encode(_lowerCAmelCase , _lowerCAmelCase )[0].sample()
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_lowerCAmelCase , )
# VAE DECODER
lowerCamelCase__ : Optional[Any] = pipeline.vae
lowerCamelCase__ : Dict = vae_decoder.config.latent_channels
lowerCamelCase__ : str = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase__ : Union[str, Any] = vae_encoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase__ : Optional[Any] = pipeline.safety_checker
lowerCamelCase__ : List[Any] = safety_checker.config.vision_config.num_channels
lowerCamelCase__ : Tuple = safety_checker.config.vision_config.image_size
lowerCamelCase__ : Dict = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=_lowerCAmelCase , )
del pipeline.safety_checker
lowerCamelCase__ : Dict = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowerCamelCase__ : Any = pipeline.feature_extractor
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_lowerCAmelCase )
print('''ONNX pipeline saved to''' , _lowerCAmelCase )
del pipeline
del onnx_pipeline
lowerCamelCase__ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
_A : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 371 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A : Any = [0, 25, 50]
_A : Dict = [25, 50, 75]
_A : Any = fuzz.membership.trimf(X, abca)
_A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A : List[str] = np.ones(75)
_A : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 265 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str=None ):
__UpperCAmelCase : Optional[int] = None
if token is not None:
__UpperCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : Tuple = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__UpperCAmelCase : Tuple = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : List[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__UpperCAmelCase : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : int = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : int = None
if token is not None:
__UpperCAmelCase : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : Optional[int] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__UpperCAmelCase : str = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__UpperCAmelCase : Union[str, Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Dict = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = None
if token is not None:
__UpperCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : str = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : List[str] = result.headers["""Location"""]
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Any = os.path.join(__lowerCamelCase , f"""{artifact_name}.zip""" )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any]=None ):
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Dict = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__UpperCAmelCase : Dict = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
__UpperCAmelCase : int = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__UpperCAmelCase : Optional[Any] = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__UpperCAmelCase : List[str] = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` """
f"""and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
__UpperCAmelCase : Dict = None
if job_name and job_links:
__UpperCAmelCase : str = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase : int = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Tuple=None ):
__UpperCAmelCase : Any = []
__UpperCAmelCase : Dict = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : Union[str, Any] = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase : List[Any] = counter.most_common()
__UpperCAmelCase : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase : int = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : List[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__UpperCAmelCase : Any = test.split("""/""" )[2]
else:
__UpperCAmelCase : Dict = None
return test
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Dict=None ):
__UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase : Tuple = [x for x in logs if x[2] is not None]
__UpperCAmelCase : int = {x[2] for x in logs}
__UpperCAmelCase : Optional[Any] = {}
for test in tests:
__UpperCAmelCase : Dict = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase : List[Any] = counter.most_common()
__UpperCAmelCase : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase : Union[str, Any] = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase : Optional[Any] = {"""count""": n_errors, """errors""": error_counts}
__UpperCAmelCase : Dict = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Dict = """| no. | error | status |"""
__UpperCAmelCase : Dict = """|-:|:-|:-|"""
__UpperCAmelCase : Tuple = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase : Tuple = reduced_by_error[error]["""count"""]
__UpperCAmelCase : Dict = f"""| {count} | {error[:100]} | |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[Any] = """| model | no. of errors | major error | count |"""
__UpperCAmelCase : List[Any] = """|-:|-:|-:|-:|"""
__UpperCAmelCase : Union[str, Any] = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase : Optional[int] = reduced_by_model[model]["""count"""]
__UpperCAmelCase , __UpperCAmelCase : List[str] = list(reduced_by_model[model]["""errors"""].items() )[0]
__UpperCAmelCase : Any = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : Union[str, Any] = k.find(" / ")
a : str = k[index + len(" / ") :]
a : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : Dict = reduce_by_error(errors)
a : Optional[int] = reduce_by_model(errors)
a : Tuple = make_github_table(reduced_by_error)
a : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 114 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( lowercase__ ):
"""simple docstring"""
a : int = 'segformer'
def __init__( self : Dict , __lowercase : str=3 , __lowercase : Dict=4 , __lowercase : Any=[2, 2, 2, 2] , __lowercase : Optional[int]=[8, 4, 2, 1] , __lowercase : List[str]=[32, 64, 160, 256] , __lowercase : Union[str, Any]=[7, 3, 3, 3] , __lowercase : Optional[int]=[4, 2, 2, 2] , __lowercase : Any=[1, 2, 5, 8] , __lowercase : List[str]=[4, 4, 4, 4] , __lowercase : Any="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.0 , __lowercase : Optional[Any]=0.1 , __lowercase : int=0.02 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-6 , __lowercase : Tuple=256 , __lowercase : List[Any]=255 , **__lowercase : Union[str, Any] , ) -> List[Any]:
super().__init__(**__lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __lowercase , )
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Any = num_encoder_blocks
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Dict = sr_ratios
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[Any] = strides
__UpperCAmelCase : List[str] = mlp_ratios
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Any = classifier_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Optional[Any] = drop_path_rate
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Dict = decoder_hidden_size
__UpperCAmelCase : Tuple = kwargs.get("""reshape_last_stage""" , __lowercase )
__UpperCAmelCase : int = semantic_loss_ignore_index
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = version.parse('1.11' )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self : str ) -> int:
return 12
| 114 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 1 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE__ = len(snake_case__ ) // 2
SCREAMING_SNAKE_CASE__ = arr[0:mid]
SCREAMING_SNAKE_CASE__ = arr[mid:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = count_inversions_recursive(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = count_inversions_recursive(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _count_cross_inversions(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
while i < len(snake_case__ ) and j < len(snake_case__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE__ = count_inversions_bf(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , snake_case__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE__ = count_inversions_bf(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , snake_case__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = count_inversions_bf(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , snake_case__ )
if __name__ == "__main__":
main()
| 165 |
"""simple docstring"""
def A ( snake_case__ = 50 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 165 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ = old_name.split('''.''' )
if layer == "0":
A__ = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
A__ = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
A__ = old_name.replace('''3''' , '''convolution2''' )
else:
A__ = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , lowercase_ ):
A__ = R'''\b\d{2}\b'''
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ = re.search(R'''\d\.\d\d.''' , lowercase_ ).group()
else:
A__ = re.search(R'''\d\.\d.''' , lowercase_ ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(lowercase_ , '''''' )
A__ = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
A__ = '''intermediate_stages.''' + trimmed_name
else:
A__ = old_name.replace(lowercase_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
A__ = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
A__ = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
A__ = trimmed_name.replace('''fc2''' , '''linear_out''' )
A__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , lowercase_ ):
A__ = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
A__ = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
A__ = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
A__ = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
A__ = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
A__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace('''norm''' , '''layernorm''' )
A__ = '''efficientformer.''' + new_name
else:
A__ = '''efficientformer.encoder.''' + new_name
return new_name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(lowercase_ )
A__ = val
return checkpoint
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
A__ = EfficientFormerConfig.from_json_file(lowercase_ )
A__ = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 256
A__ = 224
A__ = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
A__ = processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ = model(lowercase_ )
A__ = outputs.logits
A__ = (1, 1_000)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 231 |
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
A__ = n
A__ = [None] * self.n
A__ = 0 # index of the first element
A__ = 0
A__ = 0
def __len__( self : List[Any]) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''')
A__ = data
A__ = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''')
A__ = self.array[self.front]
A__ = None
A__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 231 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a__ : List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=4_00 , a=None , a=True , a=True , a=None , ):
UpperCamelCase__ = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = do_convert_rgb
UpperCamelCase__ = [5_12, 10_24, 20_48, 40_96]
UpperCamelCase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __a ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self ):
UpperCamelCase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCamelCase__ = Image.open(requests.get(a , stream=a ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ):
UpperCamelCase__ = PixaStructImageProcessingTester(self )
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def __a ( self ):
UpperCamelCase__ = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = 20_48
UpperCamelCase__ = image_processor(a , return_tensors="pt" , max_patches=a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a ):
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
UpperCamelCase__ = "Hello"
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a , header_text=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a , header_text=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ):
UpperCamelCase__ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase__ = 3
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 80 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3_6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = embedding_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_hidden_groups
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , sentence_order_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Optional[int] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[Any] = True
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
if return_labels:
if model_class in get_values(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AlbertModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("""albert-base-v2""")
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4))
| 100 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = k_size // 2
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_SCREAMING_SNAKE_CASE = 1 / (2 * pi * sigma) * exp(-(square(snake_case__ ) + square(snake_case__ )) / (2 * square(snake_case__ )) )
return g
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[0], image.shape[1]
# dst image height and width
_SCREAMING_SNAKE_CASE = height - k_size + 1
_SCREAMING_SNAKE_CASE = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_SCREAMING_SNAKE_CASE = zeros((dst_height * dst_width, k_size * k_size) )
_SCREAMING_SNAKE_CASE = 0
for i, j in product(range(snake_case__ ) ,range(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = ravel(image[i : i + k_size, j : j + k_size] )
_SCREAMING_SNAKE_CASE = window
row += 1
# turn the kernel into shape(k*k, 1)
_SCREAMING_SNAKE_CASE = gen_gaussian_kernel(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = ravel(snake_case__ )
# reshape and get the dst image
_SCREAMING_SNAKE_CASE = dot(snake_case__ ,snake_case__ ).reshape(snake_case__ ,snake_case__ ).astype(snake_case__ )
return dst
if __name__ == "__main__":
# read original image
UpperCamelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCamelCase = gaussian_filter(gray, 3, sigma=1)
UpperCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 125 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias']
_SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
_SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
_SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
_SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
_SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = channels_list[i]
_SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
_SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
_SCREAMING_SNAKE_CASE = """middle_block.0"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
_SCREAMING_SNAKE_CASE = """middle_block.1"""
_SCREAMING_SNAKE_CASE = convert_attention(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
_SCREAMING_SNAKE_CASE = """middle_block.2"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 125 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__snake_case = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__snake_case = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__snake_case = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = '''.'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : int = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = '''.'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : int = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = '''.'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 351 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Tuple = 1
while repunit:
SCREAMING_SNAKE_CASE : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_( a__ = 1_000_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=False , __magic_name__=True , __magic_name__="None" , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Any:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def __UpperCAmelCase ( self ) -> List[str]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_config()
_a = 3_00
return config
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = self.num_labels
_a = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = self.num_labels
_a = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Any:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> List[str]:
_a = DebertaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@slow
def __UpperCAmelCase ( self ) -> int:
_a = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 104 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_SCREAMING_SNAKE_CASE )} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> Any:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 104 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE_:str = None
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Any = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_:Any = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
SCREAMING_SNAKE_CASE_:Tuple = """▁"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : Tuple = BarthezTokenizer
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="</s>", lowerCamelCase__="<s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__="<mask>", **lowerCamelCase__, ):
# Mask token behave like a normal word, i.e. include the space before it
A : Optional[int] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__, tokenizer_file=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, cls_token=lowerCamelCase__, pad_token=lowerCamelCase__, mask_token=lowerCamelCase__, **lowerCamelCase__, )
A : Tuple = vocab_file
A : Optional[int] = False if not self.vocab_file else True
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A : int = [self.cls_token_id]
A : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[str] = [self.sep_token_id]
A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Any = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file, lowerCamelCase__ )
return (out_vocab_file,)
| 116 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ : str = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 371 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 253 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset") and scheduler.config.steps_offset != 1:
_A : int = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __lowerCamelCase , standard_warn=__lowerCamelCase)
_A : List[str] = dict(scheduler.config)
_A : Union[str, Any] = 1
_A : List[Any] = FrozenDict(__lowerCamelCase)
if hasattr(scheduler.config , "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
_A : Optional[int] = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __lowerCamelCase , standard_warn=__lowerCamelCase)
_A : Optional[Any] = dict(scheduler.config)
_A : List[str] = True
_A : Tuple = FrozenDict(__lowerCamelCase)
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .")
self.register_modules(
segmentation_model=__lowerCamelCase , segmentation_processor=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , )
def _lowerCamelCase ( self , __lowerCamelCase = "auto") -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
self.enable_attention_slicing(__lowerCamelCase)
def _lowerCamelCase ( self) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
_A : str = torch.device("cuda")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self) -> List[Any]:
if self.device != torch.device("meta") or not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 5_1_2 , __lowerCamelCase = 5_1_2 , __lowerCamelCase = 5_0 , __lowerCamelCase = 7.5 , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = 1 , **__lowerCamelCase , ) -> Dict:
_A : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt").to(self.device)
_A : Optional[Any] = self.segmentation_model(**__lowerCamelCase)
_A : Union[str, Any] = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
_A : int = self.numpy_to_pil(__lowerCamelCase)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
_A : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , )
| 11 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def a__ ( lowercase : Union[str, Any], lowercase : Any, lowercase : Any ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = state_dict.pop(lowercase )
_UpperCamelCase = val
def a__ ( lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
_UpperCamelCase = value
else:
_UpperCamelCase = value
return new_state_dict
def a__ ( lowercase : List[str], lowercase : Dict=False ) -> Dict:
"""simple docstring"""
_UpperCamelCase = ''''''
if is_panoptic:
_UpperCamelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(lowercase, stream=lowercase ).raw )
return im
@torch.no_grad()
def a__ ( lowercase : Optional[Any], lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_UpperCamelCase = '''resnet101'''
if "dc5" in model_name:
_UpperCamelCase = True
_UpperCamelCase = '''panoptic''' in model_name
if is_panoptic:
_UpperCamelCase = 250
else:
_UpperCamelCase = 91
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''coco-detection-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(lowercase, lowercase, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
_UpperCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_UpperCamelCase = ConditionalDetrImageProcessor(format=lowercase )
# prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase, return_tensors='''pt''' )
_UpperCamelCase = encoding['''pixel_values''']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
_UpperCamelCase = torch.hub.load('''DeppMeng/ConditionalDETR''', lowercase, pretrained=lowercase ).eval()
_UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_UpperCamelCase = '''conditional_detr.''' + src
rename_key(lowercase, lowercase, lowercase )
_UpperCamelCase = rename_backbone_keys(lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase, is_panoptic=lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_UpperCamelCase = state_dict.pop(lowercase )
_UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCamelCase = state_dict.pop(lowercase )
_UpperCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_UpperCamelCase = state_dict.pop(lowercase )
_UpperCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_UpperCamelCase = state_dict.pop(lowercase )
_UpperCamelCase = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase = ConditionalDetrForSegmentation(lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
model.push_to_hub(repo_id=lowercase, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
_UpperCamelCase = conditional_detr(lowercase )
_UpperCamelCase = model(lowercase )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase__ : List[str] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 367 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a__ ( lowercase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCamelCase = int(lowercase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def a__ ( lowercase : List[Any], lowercase : Dict, lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Any=300 ) -> Any:
"""simple docstring"""
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def a__ ( lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCamelCase = F"""{elt:.6f}""" if isinstance(lowercase, lowercase ) else str(lowercase )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = 5
_snake_case : Optional[int] = 0.2
def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase__ : int = 300 , ) -> int:
'''simple docstring'''
_UpperCamelCase = total
_UpperCamelCase = '''''' if prefix is None else prefix
_UpperCamelCase = leave
_UpperCamelCase = parent
_UpperCamelCase = width
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : str = None ) -> Dict:
'''simple docstring'''
_UpperCamelCase = value
if comment is not None:
_UpperCamelCase = comment
if self.last_value is None:
_UpperCamelCase = _UpperCamelCase = time.time()
_UpperCamelCase = _UpperCamelCase = value
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = self.warmup
_UpperCamelCase = 1
self.update_bar(lowerCAmelCase__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCamelCase = time.time()
_UpperCamelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCamelCase = self.elapsed_time / (value - self.start_value)
else:
_UpperCamelCase = None
if value >= self.total:
_UpperCamelCase = self.total
_UpperCamelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCamelCase = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase__ )
_UpperCamelCase = value
_UpperCamelCase = current_time
if self.average_time_per_item is None:
_UpperCamelCase = 1
else:
_UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase__ ) )) + str(lowerCAmelCase__ )
if self.elapsed_time is None:
_UpperCamelCase = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_UpperCamelCase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_UpperCamelCase = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = None if column_names is None else [column_names]
_UpperCamelCase = None
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.inner_table is None:
_UpperCamelCase = [list(values.keys() ), list(values.values() )]
else:
_UpperCamelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase__ )
_UpperCamelCase = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=300 ) -> int:
'''simple docstring'''
_UpperCamelCase = NotebookProgressBar(lowerCAmelCase__ , prefix=lowerCAmelCase__ , parent=self , width=lowerCAmelCase__ )
return self.child_bar
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = None
self.display()
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = False
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_UpperCamelCase = False
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if not has_length(lowerCAmelCase__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCamelCase = self.training_tracker.add_child(len(lowerCAmelCase__ ) )
else:
_UpperCamelCase = NotebookProgressBar(len(lowerCAmelCase__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCamelCase = None
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCamelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCamelCase = state.global_step
self.training_tracker.write_line(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
if self.training_tracker is not None:
_UpperCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCamelCase = log['''loss''']
break
if self.first_column == "Epoch":
_UpperCamelCase = int(state.epoch )
else:
_UpperCamelCase = state.global_step
_UpperCamelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
_UpperCamelCase = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop('''total_flos''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop('''epoch''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_runtime""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , lowerCAmelCase__ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
_UpperCamelCase = v
else:
_UpperCamelCase = k.split('''_''' )
_UpperCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
_UpperCamelCase = v
self.training_tracker.write_line(lowerCAmelCase__ )
self.training_tracker.remove_child()
_UpperCamelCase = None
# Evaluation takes a long time so we should force the next update.
_UpperCamelCase = True
def snake_case__ ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCAmelCase__ )
_UpperCamelCase = None
| 287 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Dict = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ = "pt"
elif is_tf_available():
lowercase_ = "tf"
else:
lowercase_ = "jax"
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ByTaTokenizer
lowerCamelCase = False
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
super().setUp()
A__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case__ ( self : List[Any],**lowercase_ : Tuple )-> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname,**lowercase_ )
def snake_case__ ( self : Tuple,lowercase_ : List[str],lowercase_ : List[Any]=False,lowercase_ : Optional[int]=2_0,lowercase_ : Dict=5 )-> Tuple[str, list]:
'''simple docstring'''
A__ = []
for i in range(len(lowercase_ ) ):
try:
A__ = tokenizer.decode([i],clean_up_tokenization_spaces=lowercase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda lowercase_ : re.match(r'^[ a-zA-Z]+$',t[1] ),lowercase_ ) )
A__ = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1],add_special_tokens=lowercase_ ),lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(lowercase_,clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]],clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:],clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
A__ = ' ' + output_txt
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
return output_txt, output_ids
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
A__ = self.ta_base_tokenizer
A__ = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
A__ = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'],batch_without_eos_added['input_ids'] )
def snake_case__ ( self : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = self.ta_base_tokenizer
A__ = 'Unicode €.'
A__ = tokenizer(lowercase_ )
A__ = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'],lowercase_ )
# decoding
A__ = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_,'Unicode €.</s>' )
A__ = tokenizer('e è é ê ë' )
A__ = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'],lowercase_ )
# decoding
A__ = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_,'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ),'e è é ê ë</s>' )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.ta_base_tokenizer
A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
A__ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
A__ = tokenizer(lowercase_,padding=lowercase_,return_tensors=lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase_,lowercase_ )
self.assertEqual((2, 3_7),batch.input_ids.shape )
self.assertEqual((2, 3_7),batch.attention_mask.shape )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.ta_base_tokenizer
A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A__ = tokenizer(lowercase_,padding=lowercase_,return_tensors=lowercase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids',lowercase_ )
self.assertIn('attention_mask',lowercase_ )
self.assertNotIn('decoder_input_ids',lowercase_ )
self.assertNotIn('decoder_attention_mask',lowercase_ )
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = self.ta_base_tokenizer
A__ = [
'Summary of the text.',
'Another summary.',
]
A__ = tokenizer(
text_target=lowercase_,max_length=3_2,padding='max_length',truncation=lowercase_,return_tensors=lowercase_ )
self.assertEqual(3_2,targets['input_ids'].shape[1] )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = self.ta_base_tokenizer
A__ = ['A long paragraph for summarization. </s>']
A__ = ['Summary of the text. </s>']
# fmt: off
A__ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
A__ = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
A__ = tokenizer(lowercase_,text_target=lowercase_ )
self.assertEqual(lowercase_,batch['input_ids'][0] )
self.assertEqual(lowercase_,batch['labels'][0] )
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length,4_2 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = ' He is very happy, UNwant\u00E9d,running'
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
A__ = tokenizer.__class__.from_pretrained(lowercase_ )
A__ = after_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
shutil.rmtree(lowercase_ )
A__ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
A__ = tokenizer.__class__.from_pretrained(lowercase_ )
A__ = after_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
self.assertIn('new_additional_special_token',after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length,4_2 )
A__ = tokenizer.__class__.from_pretrained(lowercase_,model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length,4_3 )
shutil.rmtree(lowercase_ )
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_,'special_tokens_map.json' ),encoding='utf-8' ) as json_file:
A__ = json.load(lowercase_ )
with open(os.path.join(lowercase_,'tokenizer_config.json' ),encoding='utf-8' ) as json_file:
A__ = json.load(lowercase_ )
A__ = [F'<extra_id_{i}>' for i in range(1_2_5 )]
A__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
A__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase_,'special_tokens_map.json' ),'w',encoding='utf-8' ) as outfile:
json.dump(lowercase_,lowercase_ )
with open(os.path.join(lowercase_,'tokenizer_config.json' ),'w',encoding='utf-8' ) as outfile:
json.dump(lowercase_,lowercase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
lowercase_,)
self.assertIn(
'an_additional_special_token',tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'],tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ),)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token',lstrip=lowercase_ )]
A__ = tokenizer_class.from_pretrained(
lowercase_,additional_special_tokens=lowercase_,)
self.assertIn('a_new_additional_special_token',tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'],tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ),)
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_ )
A__ = tokenizer_class.from_pretrained(lowercase_ )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.get_tokenizers(fast=lowercase_,do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A__ = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
A__ = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A__ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
A__ = 0
A__ = tokenizer.convert_ids_to_tokens(
lowercase_,skip_special_tokens=lowercase_ )
for attr in attributes_list:
setattr(lowercase_,attr + '_id',lowercase_ )
self.assertEqual(getattr(lowercase_,lowercase_ ),lowercase_ )
self.assertEqual(getattr(lowercase_,attr + '_id' ),lowercase_ )
setattr(lowercase_,attr + '_id',lowercase_ )
self.assertEqual(getattr(lowercase_,lowercase_ ),lowercase_ )
self.assertEqual(getattr(lowercase_,attr + '_id' ),lowercase_ )
setattr(lowercase_,'additional_special_tokens_ids',[] )
self.assertListEqual(getattr(lowercase_,'additional_special_tokens' ),[] )
self.assertListEqual(getattr(lowercase_,'additional_special_tokens_ids' ),[] )
setattr(lowercase_,'additional_special_tokens_ids',[token_id_to_test_setters] )
self.assertListEqual(getattr(lowercase_,'additional_special_tokens' ),[token_to_test_setters] )
self.assertListEqual(getattr(lowercase_,'additional_special_tokens_ids' ),[token_id_to_test_setters] )
| 7 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], **_snake_case : str ) ->Dict:
super().__init__(**_snake_case )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Union[str, Any], _snake_case : Union[np.ndarray, bytes, str], **_snake_case : Tuple ) ->Dict:
return super().__call__(_snake_case, **_snake_case )
def lowercase_ ( self : Tuple, **_snake_case : Any ) ->Union[str, Any]:
snake_case__ : str = {}
if "candidate_labels" in kwargs:
snake_case__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case__ : str = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self : Dict, _snake_case : str, _snake_case : Optional[int]=None, _snake_case : List[str]="This is a sound of {}." ) ->int:
if isinstance(_snake_case, _snake_case ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case__ : List[Any] = requests.get(_snake_case ).content
else:
with open(_snake_case, 'rb' ) as f:
snake_case__ : Union[str, Any] = f.read()
if isinstance(_snake_case, _snake_case ):
snake_case__ : List[Any] = ffmpeg_read(_snake_case, self.feature_extractor.sampling_rate )
if not isinstance(_snake_case, np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
snake_case__ : Tuple = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='pt' )
snake_case__ : int = candidate_labels
snake_case__ : int = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
snake_case__ : Optional[int] = self.tokenizer(_snake_case, return_tensors=self.framework, padding=_snake_case )
snake_case__ : List[Any] = [text_inputs]
return inputs
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any] ) ->int:
snake_case__ : Optional[int] = model_inputs.pop('candidate_labels' )
snake_case__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0], _snake_case ):
snake_case__ : Optional[Any] = text_inputs[0]
else:
# Batching case.
snake_case__ : int = text_inputs[0][0]
snake_case__ : Any = self.model(**_snake_case, **_snake_case )
snake_case__ : List[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def lowercase_ ( self : Union[str, Any], _snake_case : str ) ->List[str]:
snake_case__ : int = model_outputs.pop('candidate_labels' )
snake_case__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
snake_case__ : Tuple = logits.softmax(dim=0 )
snake_case__ : Union[str, Any] = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
snake_case__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_snake_case, _snake_case ), key=lambda _snake_case : -x[0] )
]
return result
| 277 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : str , _lowercase : Any , _lowercase : List[Any]=7 , _lowercase : Optional[Any]=3 , _lowercase : int=10 , _lowercase : Tuple=18 , _lowercase : Union[str, Any]=30 , _lowercase : Any=4_00 , _lowercase : List[str]=True , _lowercase : Union[str, Any]=None , _lowercase : str=True , _lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowercase : Any=[0.5, 0.5, 0.5] , _lowercase : Dict=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"shortest_edge": 18}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = crop_size
def __a ( self : int ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase_ = VivitImageProcessor if is_vision_available() else None
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VivitImageProcessingTester(self )
@property
def __a ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 364 | from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["input_features", "attention_mask"]
def __init__( self : int , _lowercase : Dict=80 , _lowercase : Union[str, Any]=1_60_00 , _lowercase : str=0.0 , _lowercase : int=10 , _lowercase : str=25 , _lowercase : Union[str, Any]="hamming_window" , _lowercase : Dict=3_27_68.0 , _lowercase : Optional[int]=0.97 , _lowercase : Optional[int]=1.0 , _lowercase : int=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = frame_signal_scale
SCREAMING_SNAKE_CASE__ = preemphasis_coeff
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = return_attention_mask
SCREAMING_SNAKE_CASE__ = win_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE__ = hop_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE__ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ = (self.n_fft // 2) + 1
def __a ( self : str , _lowercase : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowercase )
else:
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE__ = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowercase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowercase , preemphasis=self.preemphasis_coeff , mel_filters=_lowercase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : int ):
"""simple docstring"""
if self.normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(_lowercase , _lowercase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def __a ( self : int , _lowercase : List[np.ndarray] , _lowercase : Optional[np.ndarray] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase , _lowercase , self.padding_value ) for x, n in zip(_lowercase , _lowercase )]
def __call__( self : Dict , _lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[int] = None , **_lowercase : List[str] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE__ = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE__ = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _lowercase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 204 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_A = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> str:
lowerCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
lowerCAmelCase_ = os.path.join(self.diffusers_dir , "new_code.py" )
with open(_UpperCamelCase , "w" , newline="\n" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Tuple:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _UpperCamelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase_ = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _UpperCamelCase , overwrite_result=re.sub("DDPM" , "Test" , _UpperCamelCase ) , )
| 231 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Any=False ):
__lowercase : List[str] = """backbone.""" if is_semantic else """"""
__lowercase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Tuple=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Optional[Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Optional[int] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : int = q_bias
__lowercase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : Union[str, Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : Dict = gamma_a
__lowercase : Any = gamma_a
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowercase : Optional[int] = dct.pop(lowerCAmelCase_ )
__lowercase : Optional[int] = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Optional[int] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=False ):
__lowercase : int = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Optional[Any] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Tuple = 1024
__lowercase : str = 4096
__lowercase : List[Any] = 24
__lowercase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : int = 16
__lowercase : Dict = """huggingface/label-files"""
__lowercase : List[str] = """rvlcdip-id2label.json"""
__lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Tuple = idalabel
__lowercase : Optional[int] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Tuple = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : List[Any] = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : int = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : Optional[int] = prepare_img()
__lowercase : str = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[Any] = encoding["""pixel_values"""]
__lowercase : int = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : List[str] = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[int] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : str = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 351 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=sys.maxsize ) -> Any:
'''simple docstring'''
a__ : Any ='bilinear'
a__ : Optional[int] =max_size
a__ : Union[str, Any] =short_edge_length
def __call__( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =[]
for img in imgs:
a__ : Union[str, Any] =img.shape[:2]
# later: provide list and randomly choose index for resize
a__ : Dict =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a__ : Any =size * 1.0 / min(_snake_case , _snake_case )
if h < w:
a__ : str =size, scale * w
else:
a__ : Optional[int] =scale * h, size
if max(_snake_case , _snake_case ) > self.max_size:
a__ : Union[str, Any] =self.max_size * 1.0 / max(_snake_case , _snake_case )
a__ : Optional[int] =newh * scale
a__ : Dict =neww * scale
a__ : List[str] =int(neww + 0.5 )
a__ : Dict =int(newh + 0.5 )
if img.dtype == np.uinta:
a__ : str =Image.fromarray(_snake_case )
a__ : List[Any] =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a__ : List[Any] =np.asarray(_snake_case )
else:
a__ : Dict =img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a__ : int =nn.functional.interpolate(
_snake_case , (newh, neww) , mode=self.interp_method , align_corners=_snake_case ).squeeze(0 )
img_augs.append(_snake_case )
return img_augs
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a__ : Optional[Any] =cfg.INPUT.FORMAT
a__ : Optional[Any] =cfg.SIZE_DIVISIBILITY
a__ : int =cfg.PAD_VALUE
a__ : Optional[int] =cfg.INPUT.MAX_SIZE_TEST
a__ : Optional[int] =cfg.MODEL.DEVICE
a__ : List[str] =torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a__ : Dict =torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a__ : Any =lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : str =tuple(max(_snake_case ) for s in zip(*[img.shape for img in images] ) )
a__ : Any =[im.shape[-2:] for im in images]
a__ : Any =[
nn.functional.pad(
_snake_case , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_snake_case , _snake_case )
]
return torch.stack(_snake_case ), torch.tensor(_snake_case )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_snake_case , _snake_case ):
a__ : Optional[Any] =[images]
if single_image:
assert len(_snake_case ) == 1
for i in range(len(_snake_case ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_snake_case , images.pop(_snake_case ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_snake_case , torch.as_tensor(img_tensorize(images.pop(_snake_case ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a__ : Union[str, Any] =torch.tensor([im.shape[:2] for im in images] )
a__ : str =self.aug(_snake_case )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a__ : str =[self.normalizer(_snake_case ) for x in images]
# now pad them to do the following operations
a__ : List[str] =self.pad(_snake_case )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a__ : Tuple =torch.true_divide(_snake_case , _snake_case )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
a__ : Dict =box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 95 |
def lowercase_ (A : Union[str, Any] , A : List[str] , A : int , A : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case__ : Union[str, Any] = mf_knapsack(i - 1 , A , A , A )
else:
snake_case__ : Any = max(
mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , )
snake_case__ : Optional[int] = val
return f[i][j]
def lowercase_ (A : Optional[int] , A : Union[str, Any] , A : str , A : Dict ):
snake_case__ : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case__ : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case__ : str = dp[i - 1][w_]
return dp[n][w_], dp
def lowercase_ (A : int , A : list , A : list ):
if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case__ : Dict = len(A )
if num_items != len(A ):
snake_case__ : str = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(A )} values'''
)
raise ValueError(A )
for i in range(A ):
if not isinstance(wt[i] , A ):
snake_case__ : Optional[int] = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(A )
snake_case__ , snake_case__ : Optional[int] = knapsack(A , A , A , A )
snake_case__ : set = set()
_construct_solution(A , A , A , A , A )
return optimal_val, example_optional_set
def lowercase_ (A : list , A : list , A : int , A : int , A : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A , A , i - 1 , A , A )
else:
optimal_set.add(A )
_construct_solution(A , A , i - 1 , j - wt[i - 1] , A )
if __name__ == "__main__":
a_ :Any = [3, 2, 4, 4]
a_ :List[Any] = [4, 3, 2, 3]
a_ :Union[str, Any] = 4
a_ :List[str] = 6
a_ :Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a_ , a_ :List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a_ , a_ :Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 277 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( lowercase_ = 1_000_000 , lowercase_ = 10 ) -> int:
"""simple docstring"""
A__ = defaultdict(lowercase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
A__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 363 |
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int:
"""simple docstring"""
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 231 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __SCREAMING_SNAKE_CASE ( snake_case_ ):
snake_case_ = """deformable_detr"""
snake_case_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , __lowercase : Any=True , __lowercase : str=None , __lowercase : int=3 , __lowercase : str=3_00 , __lowercase : Tuple=10_24 , __lowercase : str=6 , __lowercase : Optional[int]=10_24 , __lowercase : Tuple=8 , __lowercase : List[Any]=6 , __lowercase : int=10_24 , __lowercase : str=8 , __lowercase : Optional[Any]=0.0 , __lowercase : List[str]=True , __lowercase : str="relu" , __lowercase : Dict=2_56 , __lowercase : Tuple=0.1 , __lowercase : int=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Optional[Any]=0.02 , __lowercase : Optional[Any]=1.0 , __lowercase : Dict=True , __lowercase : Optional[int]=False , __lowercase : str="sine" , __lowercase : List[Any]="resnet50" , __lowercase : str=True , __lowercase : List[str]=False , __lowercase : str=4 , __lowercase : int=4 , __lowercase : List[Any]=4 , __lowercase : int=False , __lowercase : Dict=3_00 , __lowercase : str=False , __lowercase : Optional[Any]=1 , __lowercase : int=5 , __lowercase : List[str]=2 , __lowercase : Optional[int]=1 , __lowercase : Union[str, Any]=1 , __lowercase : Any=5 , __lowercase : str=2 , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=0.25 , __lowercase : List[Any]=False , **__lowercase : Tuple , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__ : Dict =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] =backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : List[Any] =config_class.from_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_timm_backbone
SCREAMING_SNAKE_CASE__ : Any =backbone_config
SCREAMING_SNAKE_CASE__ : str =num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_queries
SCREAMING_SNAKE_CASE__ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =d_model
SCREAMING_SNAKE_CASE__ : Any =encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any =encoder_layers
SCREAMING_SNAKE_CASE__ : int =encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int =decoder_layers
SCREAMING_SNAKE_CASE__ : int =decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dropout
SCREAMING_SNAKE_CASE__ : Tuple =attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] =activation_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] =init_std
SCREAMING_SNAKE_CASE__ : Any =init_xavier_std
SCREAMING_SNAKE_CASE__ : Any =encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Any =auxiliary_loss
SCREAMING_SNAKE_CASE__ : Tuple =position_embedding_type
SCREAMING_SNAKE_CASE__ : int =backbone
SCREAMING_SNAKE_CASE__ : int =use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dilation
# deformable attributes
SCREAMING_SNAKE_CASE__ : Optional[int] =num_feature_levels
SCREAMING_SNAKE_CASE__ : List[str] =encoder_n_points
SCREAMING_SNAKE_CASE__ : Dict =decoder_n_points
SCREAMING_SNAKE_CASE__ : Tuple =two_stage
SCREAMING_SNAKE_CASE__ : Tuple =two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : Optional[Any] =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : str =class_cost
SCREAMING_SNAKE_CASE__ : Union[str, Any] =bbox_cost
SCREAMING_SNAKE_CASE__ : Optional[int] =giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Any =mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : int =dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Tuple =bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Union[str, Any] =giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[Any] =eos_coefficient
SCREAMING_SNAKE_CASE__ : Any =focal_alpha
SCREAMING_SNAKE_CASE__ : Any =disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def __magic_name__ ( self : List[str] ) -> int:
return self.encoder_attention_heads
@property
def __magic_name__ ( self : Dict ) -> int:
return self.d_model
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE__ : int =self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.__class__.model_type
return output | 152 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
"""simple docstring"""
from math import factorial, radians
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = 18 , __UpperCAmelCase = 10 ):
_lowercase : int = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
_lowercase : List[Any] = radians(__UpperCAmelCase )
_lowercase : List[Any] = angle_in_radians
_lowercase : Union[str, Any] = 3
_lowercase : Union[str, Any] = -1
for _ in range(__UpperCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__UpperCAmelCase )
_lowercase : Tuple = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 336 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
"""simple docstring"""
UpperCamelCase__ : int =XGLMConfig
UpperCamelCase__ : Optional[Any] ={}
UpperCamelCase__ : List[str] ="""gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Tuple =use_input_mask
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =d_model
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : Optional[int] =ffn_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Any =activation_dropout
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Optional[int] =max_position_embeddings
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Optional[Any] =2
__UpperCamelCase : str =1
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase : Union[str, Any] =None
if self.use_input_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =self.get_config()
__UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int =config_and_inputs
__UpperCamelCase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase : Union[str, Any] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
__UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
__UpperCamelCase : Optional[int] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =inputs['input_ids']
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 71 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : int = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = crop_pct
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
_snake_case = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_snake_case = int(size['height'] / crop_pct )
else:
_snake_case = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(lowerCAmelCase_ ) )
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
else:
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size=size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(lowerCAmelCase_ ) )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = crop_pct if crop_pct is not None else self.crop_pct
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , crop_pct=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_snake_case = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 160 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase : Tuple = Accelerator()
lowercase : Optional[int] = (accelerator.state.process_index + 2, 10)
lowercase : Any = torch.randint(0, 10, shape).to(accelerator.device)
lowercase : Union[str, Any] = ""
lowercase : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 160 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class lowerCamelCase_ ( _snake_case ):
'''simple docstring'''
a__ : Tuple = """roc_bert"""
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=True , __lowercase=0 , __lowercase="absolute" , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=768 , __lowercase=910 , __lowercase=512 , __lowercase=24_858 , __lowercase=True , **__lowercase , ) -> Tuple:
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :Dict = max_position_embeddings
__UpperCamelCase :List[str] = hidden_size
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :List[str] = num_attention_heads
__UpperCamelCase :str = intermediate_size
__UpperCamelCase :Union[str, Any] = hidden_act
__UpperCamelCase :Dict = hidden_dropout_prob
__UpperCamelCase :str = attention_probs_dropout_prob
__UpperCamelCase :int = initializer_range
__UpperCamelCase :str = type_vocab_size
__UpperCamelCase :Dict = layer_norm_eps
__UpperCamelCase :Optional[int] = use_cache
__UpperCamelCase :Optional[Any] = enable_pronunciation
__UpperCamelCase :Dict = enable_shape
__UpperCamelCase :int = pronunciation_embed_dim
__UpperCamelCase :str = pronunciation_vocab_size
__UpperCamelCase :List[Any] = shape_embed_dim
__UpperCamelCase :int = shape_vocab_size
__UpperCamelCase :List[str] = concat_input
__UpperCamelCase :str = position_embedding_type
__UpperCamelCase :Optional[Any] = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__)
| 43 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = self.vocab_size - 1
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = inputs_dict["""labels"""]
A_ = inputs_dict["""labels"""]
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OpenAIGPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
A_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 162 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : Dict=99 , UpperCamelCase : Dict=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : List[str]=9 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=False , UpperCamelCase : Dict=32 , UpperCamelCase : Dict=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=37 , UpperCamelCase : Any=8 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : int=0.002 , UpperCamelCase : List[Any]=1 , UpperCamelCase : List[str]=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Union[str, Any]=None , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Tuple = encoder_seq_length
__UpperCAmelCase : Dict = decoder_seq_length
# For common tests
__UpperCAmelCase : Union[str, Any] = self.decoder_seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[int] = use_attention_mask
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[Any] = d_ff
__UpperCAmelCase : Tuple = relative_attention_num_buckets
__UpperCAmelCase : Optional[Any] = dropout_rate
__UpperCAmelCase : Optional[Any] = initializer_factor
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : Dict = pad_token_id
__UpperCAmelCase : str = decoder_start_token_id
__UpperCAmelCase : str = None
__UpperCAmelCase : List[Any] = decoder_layers
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase : Union[str, Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
__UpperCAmelCase : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
__UpperCAmelCase : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase : Dict = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : List[Any] = self.get_config()
__UpperCAmelCase : str = config.num_attention_heads
__UpperCAmelCase : str = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : str = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
__UpperCAmelCase : List[str] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
__UpperCAmelCase : Optional[int] = result.last_hidden_state
__UpperCAmelCase : Tuple = result.past_key_values
__UpperCAmelCase : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase__ ( self : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Dict = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
__UpperCAmelCase : Tuple = model(UpperCamelCase , use_cache=UpperCamelCase )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
__UpperCAmelCase ,__UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )["""last_hidden_state"""]
__UpperCAmelCase : Dict = model(UpperCamelCase , past_key_values=UpperCamelCase )["""last_hidden_state"""]
# select random slice
__UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
__UpperCAmelCase : Dict = model(**UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__a = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__a = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__a = True
__a = False
__a = False
__a = True
__a = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__a = [0.8, 0.9]
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : List[str] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = config_and_inputs[0]
__UpperCAmelCase : str = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
__UpperCAmelCase : List[str] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
__UpperCAmelCase : Any = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__UpperCAmelCase : int = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
__UpperCAmelCase : Tuple = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__UpperCAmelCase : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCamelCase ).to(UpperCamelCase )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCamelCase , legacy=UpperCamelCase )
__UpperCAmelCase : Dict = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
__UpperCAmelCase : int = tokenizer(UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase ).input_ids
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Dict = model.generate(input_ids.to(UpperCamelCase ) )
__UpperCAmelCase : Optional[Any] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
__UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 320 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 320 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=7 , _lowerCamelCase : str=3 , _lowerCamelCase : Any=18 , _lowerCamelCase : Any=30 , _lowerCamelCase : Dict=400 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=True , ):
_snake_case = size if size is not None else {'''height''': 18, '''width''': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = apply_ocr
def lowercase ( self : Optional[Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( A_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase ( self : int ):
_snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowercase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Optional[Any] ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase ( self : Any ):
pass
def lowercase ( self : Optional[Any] ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowerCamelCase )
self.assertIsInstance(encoding.boxes , _lowerCamelCase )
# Test batched
_snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase ( self : str ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase ( self : Any ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase ( self : List[Any] ):
# with apply_OCR = True
_snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
_snake_case = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
_snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
_snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCamelCase )
self.assertListEqual(encoding.boxes , _lowerCamelCase )
# with apply_OCR = False
_snake_case = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
_snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 288 |
"""simple docstring"""
from math import pow
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
return current_sum, solutions_count
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__lowerCamelCase = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
__lowerCamelCase = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , bootstrap_aggregation=_UpperCAmelCase , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , bootstrap_aggregation=_UpperCAmelCase , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'rougeLsum'
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=[k] )[k]
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = ['rouge1', 'rouge2', 'rougeL']
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=_UpperCAmelCase )
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=_UpperCAmelCase )
assert score_sep == score_no_sep
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
A__ = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase ) == calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
A__ = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , rouge_keys=['rougeLsum'] , newline_sep=_UpperCAmelCase )['rougeLsum']
A__ = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = Path('examples/seq2seq/test_data/wmt_en_ro' )
A__ = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
A__ = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
| 359 | """simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = 'AutoTokenizer'
lowerCAmelCase__ : int = ['tokenizer']
lowerCAmelCase__ : int = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[str]:
super().__init__(__UpperCAmelCase )
A__ = speaker_embeddings
@classmethod
def snake_case__ ( cls ,__UpperCAmelCase ,__UpperCAmelCase="speaker_embeddings_path.json" ,**__UpperCAmelCase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
A__ = get_file_from_repo(
__UpperCAmelCase ,__UpperCAmelCase ,subfolder=kwargs.pop('subfolder' ,__UpperCAmelCase ) ,cache_dir=kwargs.pop('cache_dir' ,__UpperCAmelCase ) ,force_download=kwargs.pop('force_download' ,__UpperCAmelCase ) ,proxies=kwargs.pop('proxies' ,__UpperCAmelCase ) ,resume_download=kwargs.pop('resume_download' ,__UpperCAmelCase ) ,local_files_only=kwargs.pop('local_files_only' ,__UpperCAmelCase ) ,use_auth_token=kwargs.pop('use_auth_token' ,__UpperCAmelCase ) ,revision=kwargs.pop('revision' ,__UpperCAmelCase ) ,)
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(__UpperCAmelCase ,__UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
A__ = None
else:
with open(__UpperCAmelCase ) as speaker_embeddings_json:
A__ = json.load(__UpperCAmelCase )
else:
A__ = None
A__ = AutoTokenizer.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
return cls(tokenizer=__UpperCAmelCase ,speaker_embeddings=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="speaker_embeddings_path.json" ,__UpperCAmelCase="speaker_embeddings" ,__UpperCAmelCase = False ,**__UpperCAmelCase ,) -> Tuple:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ,'v2' ) ,exist_ok=__UpperCAmelCase )
A__ = {}
A__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A__ = self._load_voice_preset(__UpperCAmelCase )
A__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,__UpperCAmelCase ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=__UpperCAmelCase ,)
A__ = os.path.join(__UpperCAmelCase ,f'''{prompt_key}_{key}.npy''' )
A__ = tmp_dict
with open(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ,'w' ) as fp:
json.dump(__UpperCAmelCase ,__UpperCAmelCase )
super().save_pretrained(__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase = None ,**__UpperCAmelCase ) -> List[Any]:
A__ = self.speaker_embeddings[voice_preset]
A__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
A__ = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,__UpperCAmelCase ) ,cache_dir=kwargs.pop('cache_dir' ,__UpperCAmelCase ) ,force_download=kwargs.pop('force_download' ,__UpperCAmelCase ) ,proxies=kwargs.pop('proxies' ,__UpperCAmelCase ) ,resume_download=kwargs.pop('resume_download' ,__UpperCAmelCase ) ,local_files_only=kwargs.pop('local_files_only' ,__UpperCAmelCase ) ,use_auth_token=kwargs.pop('use_auth_token' ,__UpperCAmelCase ) ,revision=kwargs.pop('revision' ,__UpperCAmelCase ) ,)
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
A__ = np.load(__UpperCAmelCase )
return voice_preset_dict
def snake_case__ ( self ,__UpperCAmelCase = None ) -> Dict:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="pt" ,__UpperCAmelCase=2_56 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Tuple:
if voice_preset is not None and not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
if (
isinstance(__UpperCAmelCase ,__UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A__ = self._load_voice_preset(__UpperCAmelCase )
else:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
A__ = voice_preset + '.npz'
A__ = np.load(__UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__UpperCAmelCase ,**__UpperCAmelCase )
A__ = BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
A__ = self.tokenizer(
__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding='max_length' ,max_length=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,**__UpperCAmelCase ,)
if voice_preset is not None:
A__ = voice_preset
return encoded_text
| 154 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''xlm-prophetnet'''
_a = ['''past_key_values''']
_a = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self: Any , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[Union[str, Callable]] = "gelu" , _lowerCAmelCase: Optional[int] = 3_05_22 , _lowerCAmelCase: Optional[int] = 10_24 , _lowerCAmelCase: Optional[int] = 40_96 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[int] = 40_96 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[int] = 5_12 , _lowerCAmelCase: Optional[float] = 0.02 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 2 , _lowerCAmelCase: Optional[int] = 32 , _lowerCAmelCase: Optional[int] = 1_28 , _lowerCAmelCase: Optional[bool] = False , _lowerCAmelCase: Optional[float] = 0.0 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: Optional[int] = 2 , **_lowerCAmelCase: List[str] , ):
lowercase :Tuple = vocab_size
lowercase :Any = hidden_size
lowercase :Dict = encoder_ffn_dim
lowercase :List[str] = num_encoder_layers
lowercase :Union[str, Any] = num_encoder_attention_heads
lowercase :Union[str, Any] = decoder_ffn_dim
lowercase :Optional[Any] = num_decoder_layers
lowercase :Optional[int] = num_decoder_attention_heads
lowercase :Union[str, Any] = max_position_embeddings
lowercase :Tuple = init_std # Normal(0, this parameter)
lowercase :Dict = activation_function
# parameters for xlmprophetnet
lowercase :Tuple = ngram
lowercase :List[Any] = num_buckets
lowercase :Tuple = relative_max_distance
lowercase :List[Any] = disable_ngram_loss
lowercase :Any = eps
# 3 Types of Dropout
lowercase :str = attention_dropout
lowercase :Optional[Any] = activation_dropout
lowercase :List[Any] = dropout
lowercase :Optional[Any] = use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 236 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase :Tuple = 1024
lowercase :Tuple = 4096
lowercase :Union[str, Any] = 24
lowercase :Tuple = 16
lowercase :Optional[Any] = [5, 11, 17, 23]
lowercase :Optional[Any] = [256, 512, 1024, 1024]
lowercase :Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase :Any = 768
lowercase :Optional[int] = [1, 1, 1, 0.5]
lowercase :int = [256, 512, 768, 768]
lowercase :str = 150
lowercase :Tuple = 16
lowercase :List[str] = (1, 384, 384)
lowercase :Any = False
lowercase :Optional[int] = "project"
if "ade" in checkpoint_url:
lowercase :Dict = True
lowercase :str = 768
lowercase :Tuple = [1, 1, 1, 0.5]
lowercase :Dict = 150
lowercase :Any = 16
lowercase :List[Any] = "huggingface/label-files"
lowercase :Union[str, Any] = "ade20k-id2label.json"
lowercase :List[str] = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type="dataset" ) ), "r" ) )
lowercase :Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Union[str, Any] = idalabel
lowercase :Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase :List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase :Any = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
lowercase :int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
lowercase :Any = name.replace("patch_embed", "" )
if "pos_embed" in name:
lowercase :Optional[Any] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
lowercase :List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase :str = name.replace("proj", "projection" )
if "blocks" in name:
lowercase :List[Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :Any = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase :Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase :Any = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
lowercase :Optional[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
lowercase :Union[str, Any] = name.replace("scratch", "neck" )
if "layer1_rn" in name:
lowercase :str = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
lowercase :Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
lowercase :Tuple = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
lowercase :Optional[int] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
lowercase :Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase :Any = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase :str = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
lowercase :Optional[Any] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
lowercase :int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
lowercase :Optional[int] = name.replace("conv1", "convolution1" )
if "conv2" in name:
lowercase :List[str] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase :Tuple = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase :Any = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase :Optional[Any] = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase :str = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase :str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase :Any = name.replace("pretrained", "dpt" )
if "bn" in name:
lowercase :Optional[int] = name.replace("bn", "batch_norm" )
if "head" in name:
lowercase :Union[str, Any] = name.replace("head", "head.head" )
if "encoder.norm" in name:
lowercase :Optional[Any] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
lowercase :str = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
lowercase :List[str] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
lowercase :Optional[int] = name.replace("..", "." )
if "stem.conv" in name:
lowercase :Union[str, Any] = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
lowercase :List[str] = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
lowercase :int = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
lowercase :List[str] = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase :Dict = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
lowercase :Dict = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase :str = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase :List[str] = in_proj_weight[: config.hidden_size, :]
lowercase :int = in_proj_bias[: config.hidden_size]
lowercase :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase :List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( ):
lowercase :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase :Any = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase , lowercase :Optional[int] = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase :List[str] = torch.load(lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase :int = state_dict.pop(lowerCamelCase )
lowercase :Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowercase :int = DPTForSemanticSegmentation(lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase :Union[str, Any] = 480 if "ade" in checkpoint_url else 384
lowercase :Optional[int] = DPTImageProcessor(size=lowerCamelCase )
lowercase :Dict = prepare_img()
lowercase :Optional[Any] = image_processor(lowerCamelCase, return_tensors="pt" )
# forward pass
lowercase :Any = model(**lowerCamelCase ).logits if "ade" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
lowercase :List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[int] ={
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =XLMProphetNetTokenizer
snake_case_ =False
snake_case_ =True
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = '''[PAD]'''
lowerCAmelCase__ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''[PAD]''' )
self.assertEqual(vocab_keys[1] ,'''[CLS]''' )
self.assertEqual(vocab_keys[-1] ,'''j''' )
self.assertEqual(len(__lowerCamelCase ) ,10_12 )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,10_12 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase__ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] ,)
@cached_property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''Hello World!'''
lowerCAmelCase__ : str = [3_53_89, 66_72, 49, 2]
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''microsoft/xprophetnet-large-wiki100-cased''' ,revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' ,)
| 94 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : str):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = ort.SessionOptions()
__lowerCamelCase : str = False
return options
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy')
# using the PNDM scheduler by default
__lowerCamelCase : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = 'A red cat sitting on a park bench'
__lowerCamelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCamelCase : Any = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=1_5 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,)
__lowerCamelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , __a : List[Any] ):
_a = data
# Initialize hash values
_a = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
_a = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
_a = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase__ ( __a : Tuple ):
_a = B"\x80" + (B"\x00" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
_a = struct.pack(">Q" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase__ ( self : Optional[Any] ):
# Convert into blocks of 64 bytes
_a = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_a = list(struct.unpack(">16L" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
_a , _a , _a , _a , _a , _a , _a , _a = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_a = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_a = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_a = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
_a = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
_a = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
_a = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
_a = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
_a = (a & b) ^ (a & c) ^ (b & c)
_a = (sa + maj) % 0X1_00_00_00_00
_a , _a , _a , _a , _a , _a , _a , _a = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
_a = [a, b, c, d, e, f, g, h]
# Modify final values
_a = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
_a = "".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase__ ( self : int , __a : int , __a : Dict ):
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
import hashlib
_a = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def _lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
_a = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
_a = parser.parse_args()
_a = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_a = f.read()
else:
_a = bytes(_A , "utf-8" )
print(SHAaaa(_A ).hash )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self , _A , _A , _A ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(len(_A ) , len(_A ) )
for a, b in zip(_A , _A ):
self.assertAlmostEqual(_A , _A , delta=_A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_A ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = None
ops.enable_eager_execution_internal()
_UpperCAmelCase : Any = tf.config.list_physical_devices("""CPU""" )
if len(_A ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_UpperCAmelCase : Union[str, Any] = tf.config.list_logical_devices(device_type="""CPU""" )
_UpperCAmelCase : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_UpperCAmelCase : Union[str, Any] = GradientAccumulator()
_UpperCAmelCase : str = tf.Variable([4.0, 3.0] )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = create_optimizer(5e-5 , 10 , 5 )
_UpperCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=_A )
def accumulate_on_replica(_A ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_A , _A ):
with strategy.scope():
_UpperCAmelCase : Dict = strategy.experimental_local_results(_A )
local_variables[0].assign(_A )
local_variables[1].assign(_A )
strategy.run(_A , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_A )
def _check_local_values(_A , _A ):
_UpperCAmelCase : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _A , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _A , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 246 |
"""simple docstring"""
import random
class _UpperCAmelCase :
@staticmethod
def __snake_case ( _A ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [ord(_A ) for i in text]
_UpperCAmelCase : str = []
_UpperCAmelCase : int = []
for i in plain:
_UpperCAmelCase : List[str] = random.randint(1 , 3_00 )
_UpperCAmelCase : Any = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def __snake_case ( _A , _A ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for i in range(len(_A ) ):
_UpperCAmelCase : List[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ : List[Any] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 246 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , _a : Tuple , _a : Union[str, Any]=1_3 , _a : Optional[int]=7 , _a : List[Any]=True , _a : List[str]=True , _a : Dict=True , _a : List[Any]=True , _a : Union[str, Any]=9_9 , _a : Optional[Any]=3_2 , _a : Optional[Any]=5 , _a : str=4 , _a : Dict=3_7 , _a : Optional[int]="gelu" , _a : Tuple=0.1 , _a : str=0.1 , _a : List[Any]=5_1_2 , _a : List[Any]=1_6 , _a : List[str]=2 , _a : Dict=0.0_2 , _a : List[str]=4 , ):
a__: Dict =parent
a__: List[str] =batch_size
a__: Any =seq_length
a__: Tuple =is_training
a__: Tuple =use_attention_mask
a__: Union[str, Any] =use_token_type_ids
a__: str =use_labels
a__: Union[str, Any] =vocab_size
a__: Optional[int] =hidden_size
a__: Tuple =num_hidden_layers
a__: Dict =num_attention_heads
a__: str =intermediate_size
a__: Dict =hidden_act
a__: List[Any] =hidden_dropout_prob
a__: List[Any] =attention_probs_dropout_prob
a__: Optional[Any] =max_position_embeddings
a__: Dict =type_vocab_size
a__: Dict =type_sequence_label_size
a__: Union[str, Any] =initializer_range
a__: Optional[Any] =num_choices
def _lowerCamelCase ( self : Dict ):
a__: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__: str =None
if self.use_attention_mask:
a__: Tuple =random_attention_mask([self.batch_size, self.seq_length] )
a__: str =None
if self.use_token_type_ids:
a__: int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__: int =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : int ):
a__: Any =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__: Optional[Any] =config_and_inputs
a__: Union[str, Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
a__: Tuple =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__: List[str] =config_and_inputs
a__: List[Any] =True
a__: Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__: Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( _a , unittest.TestCase ):
_lowerCAmelCase = True
_lowerCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Dict ):
a__: Union[str, Any] =FlaxBertModelTester(self )
@slow
def _lowerCamelCase ( self : List[str] ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
a__: List[Any] =FlaxBertModel.from_pretrained("bert-base-cased" )
a__: str =model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 42 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : List[Any]=17 , _UpperCAmelCase : Any=23 , _UpperCAmelCase : str=11 , _UpperCAmelCase : List[str]=True , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = act_dim
UpperCAmelCase__ = state_dim
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = is_training
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
UpperCAmelCase__ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = DecisionTransformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase__ = model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) = config_and_inputs
UpperCAmelCase__ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = ()
lowerCAmelCase_ : List[str] = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase_ : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = DecisionTransformerModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DecisionTransformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_lowerCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(_lowerCAmelCase )] , _lowerCAmelCase )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ = 10 # defined by the RL environment, may be normalized
UpperCAmelCase__ = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
UpperCAmelCase__ = model.to(_lowerCAmelCase )
UpperCAmelCase__ = model.config
torch.manual_seed(0 )
UpperCAmelCase__ = torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=_lowerCAmelCase )
UpperCAmelCase__ = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ = state
UpperCAmelCase__ = torch.zeros(1 , 0 , config.act_dim , device=_lowerCAmelCase , dtype=torch.floataa )
UpperCAmelCase__ = torch.zeros(1 , 0 , device=_lowerCAmelCase , dtype=torch.floataa )
UpperCAmelCase__ = torch.tensor(0 , device=_lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_lowerCAmelCase ):
UpperCAmelCase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowerCAmelCase )] , dim=1 )
UpperCAmelCase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowerCAmelCase )] , dim=1 )
UpperCAmelCase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ = model(
states=_lowerCAmelCase , actions=_lowerCAmelCase , rewards=_lowerCAmelCase , returns_to_go=_lowerCAmelCase , timesteps=_lowerCAmelCase , attention_mask=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
UpperCAmelCase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ = action_pred[0, -1]
UpperCAmelCase__ = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ = returns_to_go[0, -1] - reward
UpperCAmelCase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 346 |
import numpy
# List of input, output pairs
_UpperCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Union[str, Any] = len(train_data)
_UpperCAmelCase : Dict = 0.0_0_9
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase="train" ):
return calculate_hypothesis_value(lowerCamelCase, lowerCamelCase ) - output(
lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=m ):
lowercase :Union[str, Any] = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = summation_of_cost_derivative(lowerCamelCase, lowerCamelCase ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase :str = 0.000_002
lowercase :Tuple = 0
lowercase :Optional[int] = 0
while True:
j += 1
lowercase :Union[str, Any] = [0, 0, 0, 0]
for i in range(0, len(lowerCamelCase ) ):
lowercase :Dict = get_cost_derivative(i - 1 )
lowercase :Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase, lowerCamelCase, atol=lowerCamelCase, rtol=lowerCamelCase, ):
break
lowercase :Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase__ ( ):
for i in range(len(lowerCamelCase ) ):
print(("Actual output value:", output(lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 236 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
UpperCAmelCase__ : str = LEDConfig
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Any = """gelu"""
def __init__(self : Tuple , _A : Union[str, Any] , _A : List[str]=1_3 , _A : Union[str, Any]=7 , _A : Optional[int]=True , _A : Dict=False , _A : Dict=9_9 , _A : List[str]=3_2 , _A : List[Any]=2 , _A : Any=4 , _A : Any=3_7 , _A : int=0.1 , _A : Tuple=0.1 , _A : Optional[int]=2_0 , _A : str=2 , _A : Union[str, Any]=1 , _A : Dict=0 , _A : Optional[int]=4 , ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = eos_token_id
snake_case = pad_token_id
snake_case = bos_token_id
snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase(self : Any ) -> List[Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case = prepare_led_inputs_dict(__a , __a , __a )
snake_case = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
snake_case = global_attention_mask
return config, inputs_dict
def UpperCAmelCase(self : List[str] , _A : str , _A : Optional[int] ) -> List[Any]:
snake_case = TFLEDModel(config=__a ).get_decoder()
snake_case = inputs_dict["input_ids"]
snake_case = input_ids[:1, :]
snake_case = inputs_dict["attention_mask"][:1, :]
snake_case = 1
# first forward pass
snake_case = model(__a , attention_mask=__a , use_cache=__a )
snake_case , snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case = model(__a , attention_mask=__a )[0]
snake_case = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case = output_from_no_past[:, -3:, random_slice_idx]
snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def lowercase_ ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , ) -> str:
"""simple docstring"""
if attention_mask is None:
snake_case = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCAmelCase__ : Any = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : int = False
def UpperCAmelCase(self : Union[str, Any] ) -> Tuple:
snake_case = TFLEDModelTester(self )
snake_case = ConfigTester(self , config_class=__a )
def UpperCAmelCase(self : str ) -> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : Union[str, Any] ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def UpperCAmelCase(self : Optional[int] ) -> Any:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = tf.zeros_like(inputs_dict["attention_mask"] )
snake_case = 2
snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
snake_case = True
snake_case = self.model_tester.seq_length
snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_A : Optional[int] ):
snake_case = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_A : Dict ):
snake_case = [t.numpy() for t in outputs.encoder_attentions]
snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = False
snake_case = model_class(__a )
snake_case = model(self._prepare_for_class(__a , __a ) )
snake_case = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
snake_case = model_class(__a )
snake_case = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(__a )
snake_case = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
snake_case = True
snake_case = True
snake_case = model_class(__a )
snake_case = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCAmelCase(self : str ) -> Tuple:
pass
def UpperCAmelCase(self : Union[str, Any] ) -> List[str]:
# TODO: Head-masking not yet implement
pass
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
return tf.constant(snake_case_ , dtype=tf.intaa )
_A = 1e-4
@slow
@require_tf
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : str ) -> Optional[Any]:
snake_case = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
snake_case = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
snake_case = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
snake_case = prepare_led_inputs_dict(model.config , __a , __a )
snake_case = model(**__a )[0]
snake_case = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __a )
# change to expected output here
snake_case = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def UpperCAmelCase(self : Any ) -> int:
snake_case = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
snake_case = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
snake_case = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
snake_case = prepare_led_inputs_dict(model.config , __a , __a )
snake_case = model(**__a )[0]
snake_case = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
snake_case = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 368 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 137 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :int , lowerCamelCase :Dict , lowerCamelCase :Dict , lowerCamelCase :str , lowerCamelCase :List[str] , lowerCamelCase :Any=1 , lowerCamelCase :Optional[int]=False , **lowerCamelCase :Optional[Any] ) -> Tuple:
super().__init__(**lowerCamelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [vocab_size]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
UpperCAmelCase__ = keep_order
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Optional[Any] ) -> int:
if self.n_clusters > 0:
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCamelCase , name="cluster_weight" )
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(lowerCamelCase )
else:
self.out_projs.append(lowerCamelCase )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.d_embed // (self.div_val**i)
UpperCAmelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(lowerCamelCase )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( lowerCamelCase :Tuple , lowerCamelCase :int , lowerCamelCase :Dict , lowerCamelCase :Dict=None ) -> Dict:
UpperCAmelCase__ = x
if proj is not None:
UpperCAmelCase__ = tf.einsum("ibd,ed->ibe" , lowerCamelCase , lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , lowerCamelCase , lowerCamelCase ) + b
@staticmethod
def UpperCAmelCase_ ( lowerCamelCase :Optional[int] , lowerCamelCase :Dict ) -> str:
UpperCAmelCase__ = shape_list(lowerCamelCase )
UpperCAmelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any] , lowerCamelCase :str=True , lowerCamelCase :Optional[Any]=False ) -> Any:
UpperCAmelCase__ = 0
if self.n_clusters == 0:
UpperCAmelCase__ = self._logit(lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase , logits=lowerCamelCase )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase , axis=-1 )
else:
UpperCAmelCase__ = shape_list(lowerCamelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase__ = (target >= l_idx) & (target < r_idx)
UpperCAmelCase__ = tf.where(lowerCamelCase )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) - l_idx
if self.div_val == 1:
UpperCAmelCase__ = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i][0]
UpperCAmelCase__ = self.out_layers[i][1]
if i == 0:
UpperCAmelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase__ = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[0] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase , lowerCamelCase )
else:
UpperCAmelCase__ = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[i] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase , lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase , -cur_logprob , shape_list(lowerCamelCase ) )
UpperCAmelCase__ = tf.concat(lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase__ = tf.reduce_mean(lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 169 |
def lowerCAmelCase ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_lowerCAmelCase , (list, tuple) ) or not all(
isinstance(_lowerCAmelCase , _lowerCAmelCase ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
UpperCAmelCase__ = UpperCAmelCase__ = UpperCAmelCase__ = numbers[0]
for i in range(1 , len(_lowerCAmelCase ) ):
# update the maximum and minimum subarray products
UpperCAmelCase__ = numbers[i]
if number < 0:
UpperCAmelCase__ , UpperCAmelCase__ = min_till_now, max_till_now
UpperCAmelCase__ = max(_lowerCAmelCase , max_till_now * number )
UpperCAmelCase__ = min(_lowerCAmelCase , min_till_now * number )
# update the maximum product found till now
UpperCAmelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
return max_prod
| 169 | 1 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[int] , snake_case : List[str]=None , **snake_case : Any ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case , )
super().__init__(args=snake_case , **snake_case )
| 296 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_a = logging.getLogger(__name__)
_a = tf.data.AUTOTUNE
def __a ( ):
UpperCAmelCase_ : Any = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config", type=__lowerCamelCase, default="roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", )
parser.add_argument(
"--tokenizer", type=__lowerCamelCase, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", )
parser.add_argument(
"--per_replica_batch_size", type=__lowerCamelCase, default=8, help="Batch size per TPU core.", )
parser.add_argument(
"--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", )
parser.add_argument(
"--tpu_name", type=__lowerCamelCase, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", )
parser.add_argument(
"--tpu_zone", type=__lowerCamelCase, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", )
parser.add_argument(
"--gcp_project", type=__lowerCamelCase, help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", )
parser.add_argument(
"--train_dataset", type=__lowerCamelCase, help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--shuffle_buffer_size", type=__lowerCamelCase, default=2**18, help="Size of the shuffle buffer (in samples)", )
parser.add_argument(
"--eval_dataset", type=__lowerCamelCase, help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--num_epochs", type=__lowerCamelCase, default=1, help="Number of epochs to train for.", )
parser.add_argument(
"--learning_rate", type=__lowerCamelCase, default=1E-4, help="Learning rate to use for training.", )
parser.add_argument(
"--weight_decay_rate", type=__lowerCamelCase, default=1E-3, help="Weight decay rate to use for training.", )
parser.add_argument(
"--max_length", type=__lowerCamelCase, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", )
parser.add_argument(
"--mlm_probability", type=__lowerCamelCase, default=0.15, help="Fraction of tokens to mask during training.", )
parser.add_argument("--output_dir", type=__lowerCamelCase, required=__lowerCamelCase, help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id", type=__lowerCamelCase, help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ : Dict = parser.parse_args()
return args
def __a ( __lowerCamelCase ):
try:
if args.tpu_name:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
UpperCAmelCase_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(__lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase )
return tpu
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
for file in file_list:
UpperCAmelCase_ : Union[str, Any] = file.split("/" )[-1]
UpperCAmelCase_ : Union[str, Any] = re.search(r"-\d+-(\d+)\.tfrecord", __lowerCamelCase ).group(1 )
UpperCAmelCase_ : List[Any] = int(__lowerCamelCase )
num_samples += sample_count
return num_samples
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
UpperCAmelCase_ : Optional[int] = count_samples(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = tf.data.Dataset.from_tensor_slices(__lowerCamelCase )
if shuffle:
UpperCAmelCase_ : Dict = dataset.shuffle(len(__lowerCamelCase ) )
UpperCAmelCase_ : Any = tf.data.TFRecordDataset(__lowerCamelCase, num_parallel_reads=__lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ : str = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) )
UpperCAmelCase_ : Any = dataset.map(__lowerCamelCase, num_parallel_calls=__lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ : Optional[int] = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ : Tuple = dataset.batch(__lowerCamelCase, drop_remainder=__lowerCamelCase )
UpperCAmelCase_ : Tuple = dataset.map(__lowerCamelCase, num_parallel_calls=__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = dataset.prefetch(__lowerCamelCase )
return dataset
def __a ( __lowerCamelCase ):
if not args.no_tpu:
UpperCAmelCase_ : List[str] = initialize_tpu(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = tf.distribute.TPUStrategy(__lowerCamelCase )
else:
UpperCAmelCase_ : Dict = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ : Any = tokenizer.vocab_size
UpperCAmelCase_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
UpperCAmelCase_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
UpperCAmelCase_ : Union[str, Any] = count_samples(__lowerCamelCase )
UpperCAmelCase_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ : Dict = TFAutoModelForMaskedLM.from_config(__lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ : int = create_optimizer(
num_train_steps=__lowerCamelCase, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCamelCase, metrics=["accuracy"] )
def decode_fn(__lowerCamelCase ):
UpperCAmelCase_ : Dict = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCamelCase, __lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase, mlm_probability=args.mlm_probability, mlm=__lowerCamelCase, return_tensors="tf" )
def mask_with_collator(__lowerCamelCase ):
# TF really needs an isin() function
UpperCAmelCase_ : List[Any] = (
~tf.cast(batch["attention_mask"], tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ : Dict = data_collator.tf_mask_tokens(
batch["input_ids"], vocab_size=len(__lowerCamelCase ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__lowerCamelCase, )
return batch
UpperCAmelCase_ : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ : List[str] = prepare_dataset(
__lowerCamelCase, decode_fn=__lowerCamelCase, mask_fn=__lowerCamelCase, batch_size=__lowerCamelCase, shuffle=__lowerCamelCase, shuffle_buffer_size=args.shuffle_buffer_size, )
UpperCAmelCase_ : int = prepare_dataset(
__lowerCamelCase, decode_fn=__lowerCamelCase, mask_fn=__lowerCamelCase, batch_size=__lowerCamelCase, shuffle=__lowerCamelCase, )
UpperCAmelCase_ : str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__lowerCamelCase ) )
model.fit(
__lowerCamelCase, validation_data=__lowerCamelCase, epochs=args.num_epochs, callbacks=__lowerCamelCase, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_a = parse_args()
main(args)
| 61 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a : str = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a : int = """main"""
# Default branch name
a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCamelCase ( ) -> List[str]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Optional[int]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Tuple:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Dict:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase( self ) -> Optional[int]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def _lowercase( self ) -> int:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def _lowercase( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , [] )
| 265 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Any = {
"gpt-neox-20b": 2048,
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = add_prefix_space
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 315 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
A_ : Dict = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : str = "sshleifer/tiny-gpt2"
A_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , )
A_ : str = TensorFlowBenchmark(snake_case )
A_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Dict = "sgugger/tiny-distilbert-classification"
A_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , only_pretrain_model=snake_case , )
A_ : List[str] = TensorFlowBenchmark(snake_case )
A_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Union[str, Any] = "sshleifer/tiny-gpt2"
A_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
A_ : Any = TensorFlowBenchmark(snake_case )
A_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Dict = "sshleifer/tiny-gpt2"
A_ : Optional[Any] = AutoConfig.from_pretrained(snake_case )
A_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , )
A_ : List[str] = TensorFlowBenchmark(snake_case , [config] )
A_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = "sshleifer/tiny-gpt2"
A_ : Any = AutoConfig.from_pretrained(snake_case )
A_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
A_ : List[Any] = TensorFlowBenchmark(snake_case , [config] )
A_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = "sshleifer/tiny-gpt2"
A_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
A_ : Optional[int] = TensorFlowBenchmark(snake_case )
A_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : int = "sshleifer/tiny-gpt2"
A_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case )
A_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
A_ : Any = TensorFlowBenchmark(snake_case , [config] )
A_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = "patrickvonplaten/t5-tiny-random"
A_ : Tuple = AutoConfig.from_pretrained(snake_case )
A_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
A_ : Union[str, Any] = TensorFlowBenchmark(snake_case , configs=[config] )
A_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : int = "sshleifer/tiny-gpt2"
A_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case , multi_process=snake_case , )
A_ : List[str] = TensorFlowBenchmark(snake_case )
A_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case , save_to_csv=snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(snake_case , "inf_mem.csv" ) , env_info_csv_file=os.path.join(snake_case , "env.csv" ) , multi_process=snake_case , )
A_ : Any = TensorFlowBenchmark(snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , "env.csv" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(snake_case :Dict ):
self.assertTrue(hasattr(snake_case , "sequential" ) )
self.assertTrue(hasattr(snake_case , "cumulative" ) )
self.assertTrue(hasattr(snake_case , "current" ) )
self.assertTrue(hasattr(snake_case , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case , "log.txt" ) , log_print=snake_case , trace_memory_line_by_line=snake_case , eager_mode=snake_case , multi_process=snake_case , )
A_ : int = TensorFlowBenchmark(snake_case )
A_ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(snake_case , "log.txt" ) ).exists() )
| 300 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Any):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase : int = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = INVOICE_URL
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
lowercase_ = """What is the placebo?"""
lowercase_ = [
{
"""image""": load_image(lowerCAmelCase_),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = dqa_pipeline(lowerCAmelCase_ , top_k=2)
self.assertEqual(
lowerCAmelCase_ , [
[
{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_), """start""": ANY(lowerCAmelCase_), """end""": ANY(lowerCAmelCase_)},
{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_), """start""": ANY(lowerCAmelCase_), """end""": ANY(lowerCAmelCase_)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""")
lowercase_ = INVOICE_URL
lowercase_ = """How many cats are there?"""
lowercase_ = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , lowerCAmelCase_)
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , lowerCAmelCase_)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(lowerCAmelCase_ , [])
# We can optionnally pass directly the words and bounding boxes
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = []
lowercase_ = []
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2)
self.assertEqual(lowerCAmelCase_ , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCAmelCase_)
lowercase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCAmelCase_ , revision="""3dc6de3""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
# This model should also work if `image` is set to None
lowercase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCAmelCase_)
lowercase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCAmelCase_ , revision="""3dc6de3""" , max_seq_len=5_0 , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
# This model should also work if `image` is set to None
lowercase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""") , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , [{"""answer""": """us-001"""}])
@require_tf
@unittest.skip("""Document question answering not implemented in TF""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
pass
| 313 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | 1 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def __lowerCAmelCase ( snake_case__=None , snake_case__=None ):
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class A :
'''simple docstring'''
A = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
A = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
A = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
A = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
A = field(
default=0.0_5 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
A = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class A :
'''simple docstring'''
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
A = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "\'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class A :
'''simple docstring'''
A = 4_2
A = True
A = None
A = None
A = None
A = None
def __call__(self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__UpperCamelCase : List[str] = [{'input_values': feature['input_values']} for feature in features]
__UpperCamelCase : Any = [{'input_ids': feature['labels']} for feature in features]
__UpperCamelCase : Tuple = self.processor.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__UpperCamelCase : Optional[int] = self.processor.pad(
labels=_UpperCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
__UpperCamelCase : int = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__UpperCamelCase : Tuple = labels
return batch
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> torch.Tensor:
model.train()
__UpperCamelCase : Dict = self._prepare_inputs(_UpperCAmelCase )
if self.use_amp:
with autocast():
__UpperCamelCase : str = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
else:
__UpperCamelCase : Union[str, Any] = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCamelCase : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCamelCase : List[Any] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']" )
if self.args.gradient_accumulation_steps > 1:
__UpperCamelCase : Tuple = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase )
else:
loss.backward()
return loss.detach()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCamelCase : Any = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCamelCase : Dict = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
__UpperCamelCase : Optional[Any] = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(snake_case__ ):
__UpperCamelCase : Union[str, Any] = re.sub(_UpperCAmelCase , "" , batch["sentence"] ).lower() + ' '
return batch
__UpperCamelCase : Dict = train_dataset.map(_UpperCAmelCase , remove_columns=["sentence"] )
__UpperCamelCase : Union[str, Any] = eval_dataset.map(_UpperCAmelCase , remove_columns=["sentence"] )
def extract_all_chars(snake_case__ ):
__UpperCamelCase : Tuple = ' '.join(batch["text"] )
__UpperCamelCase : int = list(set(_UpperCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCamelCase : List[str] = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=-1 , keep_in_memory=_UpperCAmelCase , remove_columns=train_dataset.column_names , )
__UpperCamelCase : Union[str, Any] = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=-1 , keep_in_memory=_UpperCAmelCase , remove_columns=eval_dataset.column_names , )
__UpperCamelCase : List[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
__UpperCamelCase : str = {v: k for k, v in enumerate(_UpperCAmelCase )}
__UpperCamelCase : int = vocab_dict[' ']
del vocab_dict[" "]
__UpperCamelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase : Optional[int] = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
__UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__UpperCamelCase : Dict = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCamelCase : Any = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
__UpperCamelCase : Dict = train_dataset.select(range(_UpperCAmelCase ) )
if data_args.max_val_samples is not None:
__UpperCamelCase : int = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCamelCase : List[str] = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case__ ):
__UpperCamelCase : Union[str, Any] = torchaudio.load(batch["path"] )
__UpperCamelCase : Optional[int] = resampler(_UpperCAmelCase ).squeeze().numpy()
__UpperCamelCase : int = 16_000
__UpperCamelCase : Dict = batch['text']
return batch
__UpperCamelCase : Tuple = train_dataset.map(
_UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase : int = eval_dataset.map(
_UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
__UpperCamelCase : Union[str, Any] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_UpperCAmelCase )
return batch
__UpperCamelCase : Dict = train_dataset.map(
_UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase : Tuple = eval_dataset.map(
_UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCamelCase : Optional[Any] = datasets.load_metric("wer" )
def compute_metrics(snake_case__ ):
__UpperCamelCase : List[Any] = pred.predictions
__UpperCamelCase : Dict = np.argmax(_UpperCAmelCase , axis=-1 )
__UpperCamelCase : Dict = processor.tokenizer.pad_token_id
__UpperCamelCase : Dict = processor.batch_decode(_UpperCAmelCase )
# we do not want to group tokens when computing the metrics
__UpperCamelCase : int = processor.batch_decode(pred.label_ids , group_tokens=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = wer_metric.compute(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCamelCase : List[str] = DataCollatorCTCWithPadding(processor=_UpperCAmelCase , padding=_UpperCAmelCase )
# Initialize our Trainer
__UpperCamelCase : List[Any] = CTCTrainer(
model=_UpperCAmelCase , data_collator=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCamelCase : Optional[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCamelCase : str = model_args.model_name_or_path
else:
__UpperCamelCase : Optional[int] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
__UpperCamelCase : Optional[int] = train_result.metrics
__UpperCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
__UpperCamelCase : str = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("train" , _UpperCAmelCase )
trainer.save_metrics("train" , _UpperCAmelCase )
trainer.save_state()
# Evaluation
__UpperCamelCase : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCamelCase : List[Any] = trainer.evaluate()
__UpperCamelCase : str = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCAmelCase )
__UpperCamelCase : Dict = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("eval" , _UpperCAmelCase )
trainer.save_metrics("eval" , _UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 298 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
lowercase : Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
a = "CIDAS/clipseg-rd64-refined"
a = "image_segmenter"
a = CLIPSegForImageSegmentation
a = ["image", "text"]
a = ["image"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ['''vision'''] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : List[Any] , __lowerCamelCase : "Image" , __lowerCamelCase : str ) -> List[Any]:
return self.pre_processor(text=[label] , images=[image] , padding=__lowerCamelCase , return_tensors='''pt''' )
def lowercase_ ( self : Dict , __lowerCamelCase : Any ) -> Optional[Any]:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = self.model(**__lowerCamelCase ).logits
return logits
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 350 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "decision_transformer"
a = ["past_key_values"]
a = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=17 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=5_0256 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = state_dim
SCREAMING_SNAKE_CASE__ = act_dim
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = max_ep_len
SCREAMING_SNAKE_CASE__ = action_tanh
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 218 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE_: Any ={
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : str , snake_case_ : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = from_type.lower().strip("s" )
UpperCAmelCase_ = to_type.lower().strip("s" )
UpperCAmelCase_ = UNIT_SYMBOL.get(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = UNIT_SYMBOL.get(__lowerCamelCase , __lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase_ = (
f"""Invalid \'from_type\' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase_ = (
f"""Invalid \'to_type\' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
UpperCAmelCase_ = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase_ = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase_ = 1
if from_exponent > to_exponent:
UpperCAmelCase_ = from_exponent - to_exponent
else:
UpperCAmelCase_ = -(to_exponent - from_exponent)
return value * pow(10 , __lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Dict = []
lowercase_ :List[Any] = 2
lowercase_ :Optional[Any] = int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowercase_ :Optional[Any] = [True] * (end + 1)
lowercase_ :Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start ,end + 1 ,__lowerCamelCase ):
lowercase_ :List[str] = False
start += 1
prime += in_prime
lowercase_ :Dict = end + 1
lowercase_ :Dict = min(2 * end ,__lowerCamelCase )
while low <= n:
lowercase_ :Any = [True] * (high - low + 1)
for each in in_prime:
lowercase_ :List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase ,high + 1 ,__lowerCamelCase ):
lowercase_ :str = False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ :Dict = high + 1
lowercase_ :Dict = min(high + end ,__lowerCamelCase )
return prime
print(sieve(10**6))
| 223 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
lowercase_ = parser.parse_args()
lowercase_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]:
'''simple docstring'''
A__ = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
A__ = torch.tensor([[1, 2]],device=lowercase_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=lowercase_,bbox=lowercase_ )
A__ = torch.Size([1, 2, 7_6_8] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,)
self.assertTrue(outputs.last_hidden_state.shape,lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
| 282 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (DPMSolverSDEScheduler,)
__lowercase = 10
def UpperCAmelCase_ ( self :List[Any] , **lowercase_ :Optional[int] )-> str:
A__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int )-> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> List[str]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 237 | 1 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 38 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if len(lowerCAmelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = degree
def __add__( self , lowerCAmelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase__ )
def __sub__( self , lowerCAmelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase__ ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> int | float:
SCREAMING_SNAKE_CASE = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def __A ( self ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ = 0 ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase__ )
def __eq__( self , lowerCAmelCase__ ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase__ ) -> bool:
return not self.__eq__(lowerCAmelCase__ )
| 38 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CodeGenTokenizer
SCREAMING_SNAKE_CASE__ = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
a :Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a :Dict = {'''unk_token''': '''<unk>'''}
a :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = '''lower newer'''
a :Union[str, Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a :Dict = '''lower newer'''
a :List[Any] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a :int = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Any = tokens + [tokenizer.unk_token]
a :int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_rust_tokenizer:
return
a :List[Any] = self.get_tokenizer()
a :Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
a :Tuple = '''lower newer'''
# Testing tokenization
a :Optional[Any] = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
a :int = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids without special tokens
a :List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
a :Any = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids with special tokens
a :Any = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
a :str = tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
a :List[str] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing the unknown token
a :List[Any] = tokens + [rust_tokenizer.unk_token]
a :List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a :Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
a :Optional[Any] = '''This is a simple input'''
a :List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
a :str = ('''This is a simple input''', '''This is a pair''')
a :Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
a :List[Any] = '''This is a simple input'''
a :Optional[int] = ['''This is a simple input looooooooong''', '''This is a simple input''']
a :Dict = ('''This is a simple input''', '''This is a pair''')
a :int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
a :Optional[Any] = tokenizer.pad_token_id
a :List[Any] = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
a :Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors='''np''' )
a :Tuple = tokenizer(*_lowerCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
a :Any = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = '''$$$'''
a :Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase )
a :Optional[int] = '''This is a simple input'''
a :int = ['''This is a simple input 1''', '''This is a simple input 2''']
a :Union[str, Any] = tokenizer.bos_token_id
a :List[str] = tokenizer(_lowerCamelCase )
a :str = tokenizer(_lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a :str = tokenizer.decode(out_s.input_ids )
a :Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
a :Optional[int] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
a :Dict = '''\nif len_a > len_b: result = a\nelse: result = b'''
a :str = tokenizer.encode(_lowerCamelCase )
a :List[str] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
a :Optional[Any] = tokenizer.decode(_lowerCamelCase , truncate_before_pattern=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 94 |
'''simple docstring'''
import math
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = 0.0
_a = 0.0
for i in range(len(__magic_name__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> list[list[int | float]]:
for i in range(len(__magic_name__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _A () -> None:
'''simple docstring'''
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
_a = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 168 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = 8
# DPR tok
UpperCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCamelCase = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase = {"""unk_token""": """<unk>"""}
UpperCamelCase = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCamelCase = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.tmpdirname , """rag_tokenizer""" )
UpperCamelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCamelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCamelCase = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
UpperCamelCase = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCamelCase = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
UpperCamelCase = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCamelCase = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 165 | import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = 48
UpperCamelCase = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = 60
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 126
UpperCamelCase = 7
UpperCamelCase = 2_5_5.0
UpperCamelCase = """"""
return config
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCamelCase = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
UpperCamelCase = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
UpperCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
UpperCamelCase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
UpperCamelCase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
UpperCamelCase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
UpperCamelCase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
UpperCamelCase = """layernorm.weight"""
if name == "norm.bias":
UpperCamelCase = """layernorm.bias"""
if "conv_first" in name:
UpperCamelCase = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
UpperCamelCase = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
UpperCamelCase = name.replace("""upsample.2""" , """upsample.convolution_1""" )
UpperCamelCase = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
UpperCamelCase = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
UpperCamelCase = """swin2sr.""" + name
return name
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
UpperCamelCase = key.split(""".""" )
UpperCamelCase = int(key_split[1] )
UpperCamelCase = int(key_split[4] )
UpperCamelCase = config.embed_dim
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
pass
else:
UpperCamelCase = val
return orig_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_config(UpperCamelCase_ )
UpperCamelCase = SwinaSRForImageSuperResolution(UpperCamelCase_ )
model.eval()
UpperCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
UpperCamelCase = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(UpperCamelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
UpperCamelCase = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
UpperCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase = 126 if """Jpeg""" in checkpoint_url else 256
UpperCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase = transforms(UpperCamelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase = model(UpperCamelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-3 )
print("""Looks ok!""" )
UpperCamelCase = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 165 | 1 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(_A )
else:
SCREAMING_SNAKE_CASE__ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 314 |
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=64 , lowercase=5 , lowercase=4 , lowercase=64 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = MPNetModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = MPNetForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MPNetForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = MPNetForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MPNetForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.prepare_config_and_inputs()
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = MPNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowerCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCamelCase_ = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 47 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = OmegaConf.load(lowerCamelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase__ ) ) )
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
if conf_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase_ = load_config(lowerCamelCase__ , display=lowerCamelCase__ )
lowerCamelCase_ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
if ".ckpt" in ckpt_path:
lowerCamelCase_ = sd["state_dict"]
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
model.to(lowerCamelCase__ )
del sd
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.encode(lowerCamelCase__ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCamelCase_ = model.decode(lowerCamelCase__ )
return xrec
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ = string.rsplit("." , 1 )
if reload:
lowerCamelCase_ = importlib.import_module(lowerCamelCase__ )
importlib.reload(lowerCamelCase__ )
return getattr(importlib.import_module(lowerCamelCase__ , package=lowerCamelCase__ ) , cls )
def lowerCamelCase_ ( lowerCamelCase__ ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True ):
lowerCamelCase_ = instantiate_from_config(lowerCamelCase__ )
if sd is not None:
model.load_state_dict(lowerCamelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# load the specified checkpoint
if ckpt:
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
lowerCamelCase_ = {"state_dict": None}
lowerCamelCase_ = None
lowerCamelCase_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowerCamelCase__ , eval_mode=lowerCamelCase__ )["model"]
return model, global_step
| 47 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def __lowercase ( snake_case_ : Union[str, Any] ) ->Any:
'''simple docstring'''
__A : Any = np.max(_SCREAMING_SNAKE_CASE ,axis=-1 ,keepdims=_SCREAMING_SNAKE_CASE )
__A : Any = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_SCREAMING_SNAKE_CASE )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = {}
if "second_text" in kwargs:
__A : List[Any] = kwargs['second_text']
return preprocess_kwargs, {}, {}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
return self.tokenizer(__UpperCAmelCase , text_pair=__UpperCAmelCase , return_tensors=self.framework )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = model_outputs.logits[0].numpy()
__A : Dict = softmax(__UpperCAmelCase )
__A : str = np.argmax(__UpperCAmelCase )
__A : int = self.model.config.idalabel[best_class]
__A : Tuple = probabilities[best_class].item()
__A : int = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 179 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = pipeline(
'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
lowerCAmelCase__ :List[Any] = 'What is the placebo?'
lowerCAmelCase__ :Dict = [
{
'image': load_image(__UpperCAmelCase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )},
{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ :Union[str, Any] = INVOICE_URL
lowerCAmelCase__ :Tuple = 'How many cats are there?'
lowerCAmelCase__ :List[str] = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
lowerCAmelCase__ :Any = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ :List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ :Dict = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ :List[str] = []
lowerCAmelCase__ :int = []
lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCAmelCase__ :str = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
lowerCAmelCase__ :List[Any] = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , )
lowerCAmelCase__ :List[str] = INVOICE_URL
lowerCAmelCase__ :Any = 'What is the invoice number?'
lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowerCAmelCase__ :List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ :List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :str = 'What is the invoice number?'
lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def snake_case ( self ):
'''simple docstring'''
pass
| 293 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( __UpperCamelCase ):
'''simple docstring'''
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
with open(A, encoding='utf-8' ) as input_file:
SCREAMING_SNAKE_CASE : int = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
SCREAMING_SNAKE_CASE : List[Any] = input_file.read()
SCREAMING_SNAKE_CASE : Optional[Any] = regexp.search(A )
return match
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
with open(A, encoding='utf-8' ) as input_file:
SCREAMING_SNAKE_CASE : List[str] = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()', re.DOTALL )
SCREAMING_SNAKE_CASE : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE : Tuple = regexp.finditer(A )
SCREAMING_SNAKE_CASE : str = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Path('./datasets' )
SCREAMING_SNAKE_CASE : Optional[int] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Path('./datasets' )
SCREAMING_SNAKE_CASE : Dict = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 356 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase_ = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
UpperCamelCase_ = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
UpperCamelCase_ = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'], reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
], )
def UpperCamelCase_ ( self, A, A, A=None, A=True, A=False ):
'''simple docstring'''
if rouge_types is None:
SCREAMING_SNAKE_CASE : List[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE : int = rouge_scorer.RougeScorer(rouge_types=A, use_stemmer=A )
if use_aggregator:
SCREAMING_SNAKE_CASE : Tuple = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for ref, pred in zip(A, A ):
SCREAMING_SNAKE_CASE : Tuple = scorer.score(A, A )
if use_aggregator:
aggregator.add_scores(A )
else:
scores.append(A )
if use_aggregator:
SCREAMING_SNAKE_CASE : Union[str, Any] = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE : int = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE : List[str] = [score[key] for score in scores]
return result
| 246 | 0 |
"""simple docstring"""
import operator as op
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : Tuple = []
a : int = lambda A_ , A_ : int(x / y ) # noqa: E731 integer division operation
a : Tuple = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(A_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(A_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " )
else:
a : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " )
a : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " )
stack.append(
str(opr[x](int(A_ ) , int(A_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 40 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 | 1 |
'''simple docstring'''
from __future__ import annotations
a : Optional[int] = tuple[int, int, int]
a : Optional[int] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a : Optional[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
a : List[Any] = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
a : str = """FOBHMDKEXQNRAULPGSJVTYICZW"""
a : List[Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
a : List[Any] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
a : Any = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
a : Dict = """SGLCPQWZHKXAREONTFBVIYJUDM"""
a : Optional[int] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
a : Optional[int] = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
a : str = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
a : Union[str, Any] = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowercase ) )) < 3:
UpperCAmelCase : Tuple = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_lowercase )
# Checks if rotor positions are valid
UpperCAmelCase : Union[str, Any] = rotpos
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase : Union[str, Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase : Any = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase : Dict = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
# Validates string and returns dict
UpperCAmelCase : Tuple = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def __lowerCamelCase ( _lowercase ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Any = F'''Plugboard setting isn\'t type string ({type(_lowercase )})'''
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
UpperCAmelCase : str = F'''Odd number of symbols ({len(_lowercase )})'''
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
UpperCAmelCase : Any = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase : str = F'''\'{i}\' not in list of symbols'''
raise Exception(_lowercase )
elif i in tmppbl:
UpperCAmelCase : Dict = F'''Duplicate symbol ({i})'''
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
UpperCAmelCase : List[str] = {}
for j in range(0 , len(_lowercase ) - 1 , 2 ):
UpperCAmelCase : Any = pbstring[j + 1]
UpperCAmelCase : List[Any] = pbstring[j]
return pb
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = (rotora, rotora, rotora) , _lowercase = "" , ) -> str:
UpperCAmelCase : Optional[int] = text.upper()
UpperCAmelCase : Tuple = _validator(
_lowercase , _lowercase , plugb.upper() )
UpperCAmelCase : Union[str, Any] = rotor_position
UpperCAmelCase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase : str = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase : Tuple = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase : Tuple = abc.index(_lowercase ) + rotorposa
UpperCAmelCase : Union[str, Any] = rotora[index % len(_lowercase )]
# rotor rb --------------------------
UpperCAmelCase : Optional[int] = abc.index(_lowercase ) + rotorposa
UpperCAmelCase : Dict = rotora[index % len(_lowercase )]
# rotor rc --------------------------
UpperCAmelCase : str = abc.index(_lowercase ) + rotorposa
UpperCAmelCase : Optional[int] = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase : List[Any] = reflector[symbol]
# 2nd rotors
UpperCAmelCase : Optional[int] = abc[rotora.index(_lowercase ) - rotorposa]
UpperCAmelCase : str = abc[rotora.index(_lowercase ) - rotorposa]
UpperCAmelCase : Optional[int] = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase : List[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase : str = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase : int = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase : Dict = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
a : Union[str, Any] = """This is my Python script that emulates the Enigma machine from WWII."""
a : List[str] = (1, 1, 1)
a : int = """pictures"""
a : Union[str, Any] = (rotora, rotora, rotora)
a : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 368 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[str] = """Hello, World!"""
a : List[Any] = """en_XX"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Dict = Path("""data_bin""" )
UpperCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowercase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowercase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowercase )
UpperCAmelCase : str = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase : Any = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : str = xmod_layer.fca.bias
# output
UpperCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase : Dict = xmod_layer.fca.weight
UpperCAmelCase : Dict = xmod_layer.fca.bias
UpperCAmelCase : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Tuple = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowercase ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338 | 0 |
"""simple docstring"""
import os
import string
import sys
_a : List[str]= 1 << 8
_a : int= {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
_a : List[Any]= KEYMAP["up"]
_a : Any= KEYMAP["left"]
if sys.platform == "win32":
_a : List[Any]= []
_a : List[str]= {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
_a : int= ord(str(i))
def __UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__snake_case : Tuple = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase_ ) == 0:
# Read the keystroke
__snake_case : List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__snake_case : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__snake_case : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(UpperCAmelCase_ )
if ord(UpperCAmelCase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__snake_case : Any = chr(KEYMAP['esc'] )
except KeyError:
__snake_case : Tuple = cha[1]
else:
__snake_case : Optional[int] = ch.decode(UpperCAmelCase_ )
else:
__snake_case : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__snake_case : Union[str, Any] = sys.stdin.fileno()
__snake_case : int = termios.tcgetattr(UpperCAmelCase_ )
try:
tty.setraw(UpperCAmelCase_ )
__snake_case : List[str] = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase_ , termios.TCSADRAIN , UpperCAmelCase_ )
return ch
def __UpperCAmelCase ( ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = get_raw_chars()
if ord(UpperCAmelCase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase_ ) == KEYMAP["esc"]:
__snake_case : List[str] = get_raw_chars()
if ord(UpperCAmelCase_ ) == KEYMAP["mod_int"]:
__snake_case : List[Any] = get_raw_chars()
if ord(UpperCAmelCase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 172 | """simple docstring"""
_a : Tuple= 8.3_1_4_4_5_9_8
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a : Any= 300
_a : Optional[Any]= 28
_a : Optional[int]= rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 172 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[str] = '''xlm-roberta'''
def __init__( self : List[Any] , lowerCamelCase__ : Tuple=30522 , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Any=3072 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Union[str, Any]=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : str=1E-1_2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : int="absolute" , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : List[Any] = max_position_embeddings
UpperCamelCase__ : Tuple = type_vocab_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : Union[str, Any] = classifier_dropout
class __magic_name__ ( __lowerCAmelCase):
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 364 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : Any=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : str = min_resolution
UpperCamelCase__ : str = max_resolution
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCamelCase__ : Optional[Any] = do_thumbnail
UpperCamelCase__ : int = do_align_axis
UpperCamelCase__ : List[Any] = do_pad
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : List[Any] = image_std
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 51 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.