code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'xlnet'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: List[str]=3_2000 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1024 , _SCREAMING_SNAKE_CASE: Dict=24 , _SCREAMING_SNAKE_CASE: Optional[int]=16 , _SCREAMING_SNAKE_CASE: List[str]=4096 , _SCREAMING_SNAKE_CASE: Tuple="gelu" , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: List[Any]="bi" , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: List[str]=1e-12 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Any=512 , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: List[Any]=-1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Optional[int]="last" , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Dict="tanh" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Tuple=5 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Optional[Any]=5 , _SCREAMING_SNAKE_CASE: Optional[int]=1 , _SCREAMING_SNAKE_CASE: Tuple=2 , **_SCREAMING_SNAKE_CASE: str , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Optional[int] = d_model
__lowerCAmelCase : Union[str, Any] = n_layer
__lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""")
__lowerCAmelCase : Optional[Any] = d_model // n_head
__lowerCAmelCase : Tuple = ff_activation
__lowerCAmelCase : Union[str, Any] = d_inner
__lowerCAmelCase : Any = untie_r
__lowerCAmelCase : int = attn_type
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : int = dropout
__lowerCAmelCase : List[Any] = mem_len
__lowerCAmelCase : str = reuse_len
__lowerCAmelCase : Any = bi_data
__lowerCAmelCase : Optional[int] = clamp_len
__lowerCAmelCase : Union[str, Any] = same_length
__lowerCAmelCase : List[str] = summary_type
__lowerCAmelCase : Optional[int] = summary_use_proj
__lowerCAmelCase : Tuple = summary_activation
__lowerCAmelCase : Dict = summary_last_dropout
__lowerCAmelCase : str = start_n_top
__lowerCAmelCase : List[str] = end_n_top
__lowerCAmelCase : str = bos_token_id
__lowerCAmelCase : Optional[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = kwargs["use_cache"]
__lowerCAmelCase : Optional[int] = use_mems_eval
__lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""")
return -1
@max_position_embeddings.setter
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""") | 293 | 0 |
'''simple docstring'''
from torch import nn
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 330 | '''simple docstring'''
from collections import defaultdict
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
__lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE__ ) )
]
__lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase = (1 << len(SCREAMING_SNAKE_CASE__ )) - 1
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase = self.count_ways_until(SCREAMING_SNAKE_CASE__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> str:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 330 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def __lowercase (_SCREAMING_SNAKE_CASE :Any ):
return choice(_SCREAMING_SNAKE_CASE )
def __lowercase (_SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : str = random_pivot(_SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE : List[Any] = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE : Dict = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = """▁"""
snake_case_ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
snake_case_ = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
snake_case_ = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
snake_case_ = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
snake_case_ = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class a__ ( _lowercase ):
__magic_name__ : List[str] = ["input_ids"]
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = RESOURCE_FILES_NAMES
def __init__(self : Optional[Any], __UpperCAmelCase : List[str], __UpperCAmelCase : List[str]=None, __UpperCAmelCase : str=False, __UpperCAmelCase : Union[str, Any]="utf8", __UpperCAmelCase : Optional[int]="[UNK]", __UpperCAmelCase : Any="[SEP]", __UpperCAmelCase : Optional[int]="[PAD]", __UpperCAmelCase : Tuple="[CLS]", __UpperCAmelCase : List[Any]="[MASK]", __UpperCAmelCase : Optional[Dict[str, Any]] = None, **__UpperCAmelCase : Tuple, ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase, unk_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, vocab_file=__UpperCAmelCase, encoding=__UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[Any] = sentencepiece_model_ckpt
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.load_vocab(filepath=__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE : Any = {self.sp_model.id_to_piece(__UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.vocab.items()}
def lowercase__ (self : Dict, __UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if text is None:
return None
SCREAMING_SNAKE_CASE : List[Any] = self.tokenize(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = '''''', []
for i, ch in enumerate(__UpperCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
SCREAMING_SNAKE_CASE : int = self.SP_CHAR_MAPPING.get(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = unicodedata.normalize('''NFKC''', __UpperCAmelCase )
if self.is_whitespace(__UpperCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = normalized_text, [], 0
if self.do_lower_case:
SCREAMING_SNAKE_CASE : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
SCREAMING_SNAKE_CASE : Optional[Any] = token[1:]
SCREAMING_SNAKE_CASE : Tuple = text[offset:].index(__UpperCAmelCase ) + offset
SCREAMING_SNAKE_CASE : Union[str, Any] = start + len(__UpperCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
SCREAMING_SNAKE_CASE : Dict = end
return token_mapping
@property
def lowercase__ (self : int ) -> Union[str, Any]:
"""simple docstring"""
return len(self.vocab )
def lowercase__ (self : Tuple ) -> int:
"""simple docstring"""
return dict(self.vocab, **self.added_tokens_encoder )
def __getstate__(self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Any = None
return state
def __setstate__(self : Union[str, Any], __UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__UpperCAmelCase, __UpperCAmelCase ) for c in text) )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Any, __UpperCAmelCase : Optional[Any]=False, __UpperCAmelCase : str=64, __UpperCAmelCase : int=0.1 ) -> int:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
SCREAMING_SNAKE_CASE : int = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
SCREAMING_SNAKE_CASE : int = self.sp_model.EncodeAsPieces(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE : Any = self.sp_model.SampleEncodeAsPieces(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = []
for pi, piece in enumerate(__UpperCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__UpperCAmelCase ) and pi != 0:
new_pieces.append(__UpperCAmelCase )
continue
else:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i, chunk in enumerate(__UpperCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__UpperCAmelCase ) or self.is_punct(__UpperCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
SCREAMING_SNAKE_CASE : List[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
SCREAMING_SNAKE_CASE : Dict = i
if len(__UpperCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase__ (self : Optional[int], __UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase, ''' ''' ).strip()
return out_string
def lowercase__ (self : Tuple, __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase, ''' ''' ).strip()
return out_string
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.vocab.get(__UpperCAmelCase, self.vocab.get(self.unk_token ) )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.reverse_vocab.get(__UpperCAmelCase, self.unk_token )
def lowercase__ (self : List[str], __UpperCAmelCase : List[Any], __UpperCAmelCase : int=None ) -> Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase__ (self : Optional[int], __UpperCAmelCase : Any, __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase__ (self : str, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : int=None, __UpperCAmelCase : str=False ) -> str:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ (self : Optional[Any], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__UpperCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__UpperCAmelCase ) + 1) + [1] * (len(__UpperCAmelCase ) + 3)
def lowercase__ (self : Optional[Any], __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE : int = unicodedata.category(__UpperCAmelCase )
if cat == "Zs":
return True
return False
def lowercase__ (self : List[Any], __UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
with io.open(__UpperCAmelCase, '''r''', encoding='''utf-8''' ) as f:
for index, line in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : List[Any] = line.rstrip('''\n''' )
SCREAMING_SNAKE_CASE : Tuple = int(__UpperCAmelCase )
return token_to_idx
def lowercase__ (self : List[Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
if os.path.isdir(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : str = os.path.join(
__UpperCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
SCREAMING_SNAKE_CASE : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__UpperCAmelCase, '''w''', encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE : Dict = token_index
writer.write(token + '''\n''' )
index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(__UpperCAmelCase, '''sentencepiece.bpe.model''' )
with open(__UpperCAmelCase, '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (vocab_file,)
| 507 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _a ( __a ):
__a : List[Any] = """decision_transformer"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[Any] = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , lowercase : Any=17 , lowercase : Optional[int]=4 , lowercase : Dict=128 , lowercase : Union[str, Any]=4_096 , lowercase : List[Any]=True , lowercase : int=1 , lowercase : Optional[int]=1_024 , lowercase : Tuple=3 , lowercase : Optional[int]=1 , lowercase : Any=None , lowercase : Dict="relu" , lowercase : Tuple=0.1 , lowercase : Optional[Any]=0.1 , lowercase : List[Any]=0.1 , lowercase : List[Any]=1E-5 , lowercase : Optional[Any]=0.02 , lowercase : List[Any]=True , lowercase : List[str]=True , lowercase : str=50_256 , lowercase : int=50_256 , lowercase : int=False , lowercase : Tuple=False , **lowercase : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase = state_dim
UpperCAmelCase = act_dim
UpperCAmelCase = hidden_size
UpperCAmelCase = max_ep_len
UpperCAmelCase = action_tanh
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = scale_attn_weights
UpperCAmelCase = use_cache
UpperCAmelCase = scale_attn_by_inverse_layer_idx
UpperCAmelCase = reorder_and_upcast_attn
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 358 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
A =logging.getLogger(__name__)
A ={'facebook/bart-base': BartForConditionalGeneration}
A ={'facebook/bart-base': BartTokenizer}
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_a , default=_a , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_a , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_a , default=_a , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_a , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , )
parser.add_argument(
'''--config_name''' , type=_a , default=_a , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_a , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_a , default=_a , help='''Where to store the final ONNX file.''' )
UpperCAmelCase = parser.parse_args()
return args
def snake_case_ (_a : Tuple , _a : str="cpu" ):
UpperCAmelCase = model_dict[model_name].from_pretrained(_a ).to(_a )
UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_a )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = 0
return huggingface_model, tokenizer
def snake_case_ (_a : Optional[int] , _a : List[str] , _a : str , _a : Optional[Any] , _a : str ):
model.eval()
UpperCAmelCase = None
UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_a ) )
with torch.no_grad():
UpperCAmelCase = '''My friends are cool but they eat too many carbs.'''
UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device )
UpperCAmelCase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_a , max_length=_a , early_stopping=_a , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_a , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _a , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_a , )
logger.info('''Model exported to {}'''.format(_a ) )
UpperCAmelCase = remove_dup_initializers(os.path.abspath(_a ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_a ) )
UpperCAmelCase = onnxruntime.InferenceSession(_a )
UpperCAmelCase = ort_sess.run(
_a , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_a ),
'''max_length''': np.array(_a ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def snake_case_ ():
UpperCAmelCase = parse_args()
UpperCAmelCase = 5
UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase = torch.device(args.device )
UpperCAmelCase , UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _a )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_a )
if args.max_length:
UpperCAmelCase = args.max_length
if args.num_beams:
UpperCAmelCase = args.num_beams
if args.output_file_path:
UpperCAmelCase = args.output_file_path
else:
UpperCAmelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_a , _a , _a , _a , _a )
if __name__ == "__main__":
main()
| 358 | 1 |
'''simple docstring'''
import math
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _UpperCamelCase ( ) -> None:
# Training Examples ( m, n )
lowerCamelCase_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase_ = SelfOrganizingMap()
lowerCamelCase_ = 3
lowerCamelCase_ = 0.5
for _ in range(__UpperCamelCase ):
for j in range(len(__UpperCamelCase ) ):
# training sample
lowerCamelCase_ = training_samples[j]
# Compute the winning vector
lowerCamelCase_ = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase )
# Update the winning vector
lowerCamelCase_ = self_organizing_map.update(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# classify test sample
lowerCamelCase_ = [0, 0, 0, 1]
lowerCamelCase_ = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 42 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : list[str] | None = None ):
_A : str = word_bank or []
# create a table
_A : int = len(lowerCamelCase ) + 1
_A : list[list[list[str]]] = []
for _ in range(lowerCamelCase ):
table.append([] )
# seed value
_A : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase )]:
combination.reverse()
return table[len(lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 128 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class _lowerCAmelCase ( enum.Enum ):
A__ = 'all_checks'
A__ = 'basic_checks'
A__ = 'no_checks'
class _lowerCAmelCase ( _lowercase ):
pass
class _lowerCAmelCase ( _lowercase ):
pass
class _lowerCAmelCase ( _lowercase ):
pass
class _lowerCAmelCase ( _lowercase ):
pass
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> int:
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
lowerCAmelCase__ : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase__ : Tuple = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class _lowerCAmelCase ( _lowercase ):
pass
class _lowerCAmelCase ( _lowercase ):
pass
class _lowerCAmelCase ( _lowercase ):
pass
class _lowerCAmelCase ( _lowercase ):
pass
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]:
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
lowerCAmelCase__ : List[Any] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info('''All the splits matched successfully.''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = True ) -> dict:
if record_checksum:
lowerCAmelCase__ : Optional[Any] = shaaaa()
with open(__UpperCamelCase , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'''''' ):
m.update(__UpperCamelCase )
lowerCAmelCase__ : Dict = m.hexdigest()
else:
lowerCAmelCase__ : Optional[Any] = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 713 |
from collections import Counter
from timeit import timeit
def __lowerCAmelCase ( UpperCamelCase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __lowerCAmelCase ( UpperCamelCase = "" ) -> bool:
if len(UpperCamelCase ) == 0:
return True
lowerCAmelCase__ : str = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowerCAmelCase__ : int = character_freq_dict.get(UpperCamelCase , 0 ) + 1
lowerCAmelCase__ : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase ( UpperCamelCase = "" ) -> None:
print('''\nFor string = ''' , UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowerCAmelCase_ = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 470 | 0 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
A = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
A = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def a_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence"),
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def a_ ( self : str , UpperCamelCase_ : List[Any]):
"""simple docstring"""
if self.config_name == "default":
__UpperCAmelCase : str = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
__UpperCAmelCase : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name))
def a_ ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=False):
"""simple docstring"""
if gpus is None:
__UpperCAmelCase : Optional[Any] = 1 if torch.cuda.is_available() else 0
__UpperCAmelCase : List[str] = {"src": sources, "mt": predictions, "ref": references}
__UpperCAmelCase : str = [dict(zip(UpperCamelCase_ , UpperCamelCase_)) for t in zip(*data.values())]
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_)
return {"mean_score": mean_score, "scores": scores}
| 77 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Dict = "https://openaipublic.azureedge.net/jukebox/models/"
snake_case_ : Optional[int] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def __a ( __UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowerCamelCase_ : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowerCamelCase_ : Any = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowerCamelCase_ : List[Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowerCamelCase_ : Dict = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowerCamelCase_ : Union[str, Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowerCamelCase_ : Optional[Any] = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ : str = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowerCamelCase_ : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = {}
import re
lowerCamelCase_ : Optional[int] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCamelCase_ : Dict = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase_ : str = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase_ : List[str] = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCamelCase_ : str = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase_ : Union[str, Any] = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase_ : Optional[int] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowerCamelCase_ : str = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase_ : Optional[Any] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : Any = re_encoder_block_conv_in.match(__UpperCAmelCase )
lowerCamelCase_ : Any = regex_match.groups()
lowerCamelCase_ : str = int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ : str = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
lowerCamelCase_ : Dict = re_encoder_block_conv_in.sub(__UpperCAmelCase , __UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = re_encoder_block_resnet.match(__UpperCAmelCase )
lowerCamelCase_ : str = regex_match.groups()
lowerCamelCase_ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ : Any = {"1": 1, "3": 2}[groups[-2]]
lowerCamelCase_ : Optional[int] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
lowerCamelCase_ : Union[str, Any] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
lowerCamelCase_ : Dict = prefix + resnet_block
lowerCamelCase_ : Optional[int] = re_encoder_block_resnet.sub(__UpperCAmelCase , __UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : Any = re_encoder_block_proj_out.match(__UpperCAmelCase )
lowerCamelCase_ : str = regex_match.groups()
lowerCamelCase_ : Any = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
lowerCamelCase_ : List[Any] = re_encoder_block_proj_out.sub(__UpperCAmelCase , __UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = re_decoder_block_conv_out.match(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = regex_match.groups()
lowerCamelCase_ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ : List[str] = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
lowerCamelCase_ : Union[str, Any] = re_decoder_block_conv_out.sub(__UpperCAmelCase , __UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : int = re_decoder_block_resnet.match(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = regex_match.groups()
lowerCamelCase_ : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ : List[Any] = {"1": 1, "3": 2}[groups[-2]]
lowerCamelCase_ : Optional[Any] = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
lowerCamelCase_ : List[str] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
lowerCamelCase_ : Optional[Any] = prefix + resnet_block
lowerCamelCase_ : str = re_decoder_block_resnet.sub(__UpperCAmelCase , __UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : str = re_decoder_block_proj_in.match(__UpperCAmelCase )
lowerCamelCase_ : Tuple = regex_match.groups()
lowerCamelCase_ : Dict = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
lowerCamelCase_ : List[Any] = re_decoder_block_proj_in.sub(__UpperCAmelCase , __UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = re_prior_cond_conv_out.match(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = regex_match.groups()
lowerCamelCase_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ : str = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
lowerCamelCase_ : int = re_prior_cond_conv_out.sub(__UpperCAmelCase , __UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : List[Any] = re_prior_cond_resnet.match(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = regex_match.groups()
lowerCamelCase_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ : Tuple = {"1": 1, "3": 2}[groups[-2]]
lowerCamelCase_ : Any = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
lowerCamelCase_ : Any = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
lowerCamelCase_ : Optional[int] = prefix + resnet_block
lowerCamelCase_ : Optional[Any] = re_prior_cond_resnet.sub(__UpperCAmelCase , __UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(__UpperCAmelCase ):
lowerCamelCase_ : Any = re_prior_cond_proj_in.match(__UpperCAmelCase )
lowerCamelCase_ : Any = regex_match.groups()
lowerCamelCase_ : str = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
lowerCamelCase_ : Any = re_prior_cond_proj_in.sub(__UpperCAmelCase , __UpperCAmelCase )
# keep original key
else:
lowerCamelCase_ : Tuple = original_key
lowerCamelCase_ : int = replace_key(__UpperCAmelCase )
if f"{key_prefix}.{key}" not in model_state_dict or key is None:
print(f"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
lowerCamelCase_ : str = model_state_dict[f"{key_prefix}.{key}"]
print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
lowerCamelCase_ : List[Any] = original_key
lowerCamelCase_ : Tuple = original_key
lowerCamelCase_ : Optional[Any] = value
return new_dict
@torch.no_grad()
def __a ( __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
lowerCamelCase_ : Dict = requests.get(f"{PREFIX}{file}" , allow_redirects=__UpperCAmelCase )
os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=__UpperCAmelCase )
open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
lowerCamelCase_ : Any = MODEL_MAPPING[model_name.split("/" )[-1]]
lowerCamelCase_ : Tuple = JukeboxConfig.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = JukeboxModel(__UpperCAmelCase )
lowerCamelCase_ : str = []
lowerCamelCase_ : Union[str, Any] = {}
for i, dict_name in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : str = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
lowerCamelCase_ : Dict = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowerCamelCase_ : Optional[int] = old_dic[k]
elif k.endswith(".w" ):
lowerCamelCase_ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ : Optional[int] = old_dic[k]
else:
lowerCamelCase_ : int = old_dic[k]
lowerCamelCase_ : Optional[Any] = "vqvae" if i == 0 else f"priors.{3 - i}"
lowerCamelCase_ : Dict = fix_jukebox_keys(__UpperCAmelCase , model.state_dict() , __UpperCAmelCase , __UpperCAmelCase )
weight_dict.append(__UpperCAmelCase )
lowerCamelCase_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
with open(f"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
snake_case_ : Dict = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 488 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "Speech2TextFeatureExtractor"
_UpperCamelCase : List[Any] = "Speech2TextTokenizer"
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Union[str, Any] = False
def __call__( self : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_snake_case : List[str] = kwargs.pop('raw_speech' )
else:
_snake_case : Union[str, Any] = kwargs.pop('audio' , lowerCamelCase_ )
_snake_case : Tuple = kwargs.pop('sampling_rate' , lowerCamelCase_ )
_snake_case : Optional[Any] = kwargs.pop('text' , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_snake_case : Dict = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
_snake_case : Dict = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : Optional[int] = encodings['input_ids']
return inputs
def __UpperCAmelCase ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@contextmanager
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_snake_case : Dict = True
_snake_case : Union[str, Any] = self.tokenizer
yield
_snake_case : Union[str, Any] = self.feature_extractor
_snake_case : Any = False
| 652 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Dict = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "pix2struct_text_model"
__UpperCAmelCase = ["past_key_values"]
__UpperCAmelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case_=5_0_2_4_4 , snake_case_=7_6_8 , snake_case_=6_4 , snake_case_=2_0_4_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_2 , snake_case_=1_2_8 , snake_case_=0.1 , snake_case_=1e-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ) -> Optional[int]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = use_cache
__lowercase = eos_token_id
__lowercase = decoder_start_token_id
# for backwards compatibility
__lowercase = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "pix2struct_vision_model"
def __init__( self , snake_case_=7_6_8 , snake_case_=7_6_8 , snake_case_=2_0_4_8 , snake_case_=6_4 , snake_case_=1_2 , snake_case_=1_2 , snake_case_="gelu_new" , snake_case_=1e-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1e-1_0 , snake_case_=1.0 , snake_case_=4_0_9_6 , snake_case_=3_2 , snake_case_=1_2_8 , **snake_case_ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowercase = hidden_size
__lowercase = patch_embed_hidden_size
__lowercase = d_ff
__lowercase = dropout_rate
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = initializer_range
__lowercase = initializer_factor
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = dense_act_fn
__lowercase = seq_len
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = d_kv
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "pix2struct"
__UpperCAmelCase = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.0_2 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__lowercase = PixaStructTextConfig(**snake_case_ )
__lowercase = PixaStructVisionConfig(**snake_case_ )
__lowercase = self.text_config.decoder_start_token_id
__lowercase = self.text_config.pad_token_id
__lowercase = self.text_config.eos_token_id
__lowercase = initializer_factor
__lowercase = initializer_range
__lowercase = self.initializer_range
__lowercase = self.initializer_range
__lowercase = is_vqa
@classmethod
def A ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> List[str]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 639 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a : Dict = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a : Optional[int] = concatenate_datasets
a : List[Any] = DownloadConfig
a : List[Any] = DownloadManager
a : str = DownloadMode
a : int = DownloadConfig
a : List[str] = DownloadMode
a : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 639 | 1 |
def lowerCamelCase_ ( ):
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCamelCase_ ( A : str ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
while i * i <= n:
lowerCAmelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase_ ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Union[str, Any] = 'poolformer'
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=4.0 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[64, 128, 320, 512] , _UpperCAmelCase=[7, 3, 3, 3] , _UpperCAmelCase=[4, 2, 2, 2] , _UpperCAmelCase=[2, 1, 1, 1] , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=True , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ):
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = stride
lowerCAmelCase_ = padding
lowerCAmelCase_ = pool_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = depths
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_encoder_blocks
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_layer_scale
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = initializer_range
super().__init__(**_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Optional[Any] = version.parse('1.11' )
@property
def lowercase__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowercase__ ( self):
return 2E-3
| 413 | 0 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=1E-12 ):
_SCREAMING_SNAKE_CASE : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1 ), a_min=lowerCamelCase ) ).T
_SCREAMING_SNAKE_CASE : str = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1 ), a_min=lowerCamelCase ) ).T
return jnp.matmul(lowerCamelCase, norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
SCREAMING_SNAKE_CASE_: CLIPConfig
SCREAMING_SNAKE_CASE_: jnp.dtype = jnp.floataa
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
_SCREAMING_SNAKE_CASE : int = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase_ , dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
_SCREAMING_SNAKE_CASE : List[Any] = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_SCREAMING_SNAKE_CASE : Tuple = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
_SCREAMING_SNAKE_CASE : List[str] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.vision_model(lowerCAmelCase_ )[1]
_SCREAMING_SNAKE_CASE : Optional[int] = self.visual_projection(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = jax_cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
_SCREAMING_SNAKE_CASE : List[str] = jax_cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_SCREAMING_SNAKE_CASE : Optional[int] = 0.0
_SCREAMING_SNAKE_CASE : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_SCREAMING_SNAKE_CASE : int = jnp.round(lowerCAmelCase_ , 3 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase_ )
# Use a lower threshold if an image has any special care concept
_SCREAMING_SNAKE_CASE : List[str] = is_special_care * 0.01
_SCREAMING_SNAKE_CASE : Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_SCREAMING_SNAKE_CASE : Any = jnp.round(lowerCAmelCase_ , 3 )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_: List[str] = CLIPConfig
SCREAMING_SNAKE_CASE_: Any = "clip_input"
SCREAMING_SNAKE_CASE_: int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = jnp.floataa , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> List[str]:
if input_shape is None:
_SCREAMING_SNAKE_CASE : int = (1, 2_2_4, 2_2_4, 3)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.module_class(config=lowerCAmelCase_ , dtype=lowerCAmelCase_ , **lowerCAmelCase_ )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , input_shape=lowerCAmelCase_ , seed=lowerCAmelCase_ , dtype=lowerCAmelCase_ , _do_init=_do_init )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> FrozenDict:
# init input tensor
_SCREAMING_SNAKE_CASE : Dict = jax.random.normal(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
_SCREAMING_SNAKE_CASE : str = self.module.init(lowerCAmelCase_ , lowerCAmelCase_ )['''params''']
return random_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(lowerCAmelCase_ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , rngs={} , )
| 621 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : Union[str, Any] = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = "roberta-prelayernorm"
def __init__( self : Dict , A : Dict=5_0_2_6_5 , A : Tuple=7_6_8 , A : str=1_2 , A : Optional[Any]=1_2 , A : Union[str, Any]=3_0_7_2 , A : Dict="gelu" , A : Optional[int]=0.1 , A : Union[str, Any]=0.1 , A : str=5_1_2 , A : Any=2 , A : Optional[Any]=0.02 , A : str=1e-12 , A : Optional[Any]=1 , A : Dict=0 , A : Any=2 , A : Any="absolute" , A : str=True , A : List[str]=None , **A : Any , ) ->str:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : str = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Union[str, Any] = use_cache
lowerCamelCase__ : Tuple = classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@property
def __lowerCamelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str], _snake_case : Optional[int], _snake_case : int=7, _snake_case : Any=3, _snake_case : int=1_8, _snake_case : Union[str, Any]=3_0, _snake_case : Tuple=4_0_0, _snake_case : Tuple=True, _snake_case : List[str]=None, _snake_case : str=True, _snake_case : int=None, ) ->Union[str, Any]:
snake_case__ : Dict = size if size is not None else {"""shortest_edge""": 2_0}
snake_case__ : List[str] = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : str = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[int] = image_size
snake_case__ : Dict = min_resolution
snake_case__ : Union[str, Any] = max_resolution
snake_case__ : Any = do_resize
snake_case__ : Dict = size
snake_case__ : Union[str, Any] = do_center_crop
snake_case__ : Dict = crop_size
def lowercase_ ( self : Any ) ->int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self : List[Any] ) ->Any:
snake_case__ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self : int ) ->Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__, 'do_resize' ) )
self.assertTrue(hasattr(lowercase__, 'size' ) )
self.assertTrue(hasattr(lowercase__, 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__, 'crop_size' ) )
def lowercase_ ( self : Optional[int] ) ->Dict:
snake_case__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size, {'height': 1_8, 'width': 1_8} )
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
def lowercase_ ( self : Any ) ->int:
pass
def lowercase_ ( self : Optional[Any] ) ->Optional[int]:
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__, Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : int = image_processing(lowercase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : int ) ->str:
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase__, numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__, np.ndarray )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : List[Any] = image_processing(lowercase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase__, torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__, torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : int = image_processing(lowercase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :str = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[int] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 243 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """https://openaipublic.azureedge.net/jukebox/models/"""
UpperCAmelCase_ = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Dict:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
_A = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
_A = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_A = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
_A = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :Any , _snake_case :Optional[int] ) -> Optional[Any]:
_A = {}
import re
_A = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_A = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_A = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
_A = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_snake_case ):
_A = re_encoder_block_conv_in.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] )
_A = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_A = re_encoder_block_conv_in.sub(_snake_case , _snake_case )
elif re_encoder_block_resnet.fullmatch(_snake_case ):
_A = re_encoder_block_resnet.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] )
_A = {'''1''': 1, '''3''': 2}[groups[-2]]
_A = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_A = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A = prefix + resnet_block
_A = re_encoder_block_resnet.sub(_snake_case , _snake_case )
elif re_encoder_block_proj_out.fullmatch(_snake_case ):
_A = re_encoder_block_proj_out.match(_snake_case )
_A = regex_match.groups()
_A = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_A = re_encoder_block_proj_out.sub(_snake_case , _snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_snake_case ):
_A = re_decoder_block_conv_out.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_A = re_decoder_block_conv_out.sub(_snake_case , _snake_case )
elif re_decoder_block_resnet.fullmatch(_snake_case ):
_A = re_decoder_block_resnet.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A = {'''1''': 1, '''3''': 2}[groups[-2]]
_A = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_A = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A = prefix + resnet_block
_A = re_decoder_block_resnet.sub(_snake_case , _snake_case )
elif re_decoder_block_proj_in.fullmatch(_snake_case ):
_A = re_decoder_block_proj_in.match(_snake_case )
_A = regex_match.groups()
_A = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_A = re_decoder_block_proj_in.sub(_snake_case , _snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_snake_case ):
_A = re_prior_cond_conv_out.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_A = re_prior_cond_conv_out.sub(_snake_case , _snake_case )
elif re_prior_cond_resnet.fullmatch(_snake_case ):
_A = re_prior_cond_resnet.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A = {'''1''': 1, '''3''': 2}[groups[-2]]
_A = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_A = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A = prefix + resnet_block
_A = re_prior_cond_resnet.sub(_snake_case , _snake_case )
elif re_prior_cond_proj_in.fullmatch(_snake_case ):
_A = re_prior_cond_proj_in.match(_snake_case )
_A = regex_match.groups()
_A = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_A = re_prior_cond_proj_in.sub(_snake_case , _snake_case )
# keep original key
else:
_A = original_key
_A = replace_key(_snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
_A = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_A = original_key
_A = original_key
_A = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any]=None , _snake_case :Union[str, Any]=None ) -> Dict:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
_A = requests.get(F'''{PREFIX}{file}''' , allow_redirects=_snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=_snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
_A = MODEL_MAPPING[model_name.split('''/''' )[-1]]
_A = JukeboxConfig.from_pretrained(_snake_case )
_A = JukeboxModel(_snake_case )
_A = []
_A = {}
for i, dict_name in enumerate(_snake_case ):
_A = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
_A = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
_A = old_dic[k]
elif k.endswith('''.w''' ):
_A = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_A = old_dic[k]
else:
_A = old_dic[k]
_A = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
_A = fix_jukebox_keys(_snake_case , model.state_dict() , _snake_case , _snake_case )
weight_dict.append(_snake_case )
_A = weight_dict.pop(0 )
model.vqvae.load_state_dict(_snake_case )
for i in range(len(_snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(_snake_case , _snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
UpperCAmelCase_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = BioGptTokenizer
lowerCamelCase__ : List[Any] = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = '''lower'''
SCREAMING_SNAKE_CASE__ = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 100 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620 | 1 |
import argparse
import os
import re
a_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
a_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
a_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
a_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a_ = re.compile(r"""\[([^\]]+)\]""")
def __lowerCAmelCase ( A_ : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = _re_indent.search(A_ )
return "" if search is None else search.groups()[0]
def __lowerCAmelCase ( A_ : Tuple , A_ : Dict="" , A_ : Optional[Any]=None , A_ : List[str]=None ) -> int:
__UpperCAmelCase = 0
__UpperCAmelCase = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(A_ ):
index += 1
__UpperCAmelCase = ["\n".join(lines[:index] )]
else:
__UpperCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__UpperCAmelCase = [lines[index]]
index += 1
while index < len(A_ ) and (end_prompt is None or not lines[index].startswith(A_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(A_ ) )
if index < len(A_ ) - 1:
__UpperCAmelCase = [lines[index + 1]]
index += 1
else:
__UpperCAmelCase = []
else:
blocks.append("\n".join(A_ ) )
__UpperCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A_ ) > 0:
blocks.append("\n".join(A_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A_ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __lowerCAmelCase ( A_ : List[str] ) -> Optional[Any]:
def _inner(A_ : List[Any] ):
return key(A_ ).lower().replace("_" , "" )
return _inner
def __lowerCAmelCase ( A_ : Any , A_ : int=None ) -> str:
# If no key is provided, we use a noop.
def noop(A_ : List[Any] ):
return x
if key is None:
__UpperCAmelCase = noop
# Constants are all uppercase, they go first.
__UpperCAmelCase = [obj for obj in objects if key(A_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__UpperCAmelCase = [obj for obj in objects if key(A_ )[0].isupper() and not key(A_ ).isupper()]
# Functions begin with a lowercase, they go last.
__UpperCAmelCase = [obj for obj in objects if not key(A_ )[0].isupper()]
__UpperCAmelCase = ignore_underscore(A_ )
return sorted(A_ , key=A_ ) + sorted(A_ , key=A_ ) + sorted(A_ , key=A_ )
def __lowerCAmelCase ( A_ : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(A_ : Union[str, Any] ):
__UpperCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__UpperCAmelCase = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(A_ )] ) + "]"
__UpperCAmelCase = import_statement.split("\n" )
if len(A_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__UpperCAmelCase = 2 if lines[1].strip() == "[" else 1
__UpperCAmelCase = [(i, _re_strip_line.search(A_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__UpperCAmelCase = sort_objects(A_ , key=lambda A_ : x[1] )
__UpperCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__UpperCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
__UpperCAmelCase = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCAmelCase = keys[:-1]
__UpperCAmelCase = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(A_ )] )
return "\n".join(A_ )
else:
# Finally we have to deal with imports fitting on one line
__UpperCAmelCase = _re_bracket_content.sub(_replace , A_ )
return import_statement
def __lowerCAmelCase ( A_ : List[str] , A_ : Any=True ) -> Tuple:
with open(A_ , "r" ) as f:
__UpperCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__UpperCAmelCase = split_code_in_indented_blocks(
A_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__UpperCAmelCase = main_blocks[block_idx]
__UpperCAmelCase = block.split("\n" )
# Get to the start of the imports.
__UpperCAmelCase = 0
while line_idx < len(A_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__UpperCAmelCase = len(A_ )
else:
line_idx += 1
if line_idx >= len(A_ ):
continue
# Ignore beginning and last line: they don't contain anything.
__UpperCAmelCase = "\n".join(block_lines[line_idx:-1] )
__UpperCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__UpperCAmelCase = split_code_in_indented_blocks(A_ , indent_level=A_ )
# We have two categories of import key: list or _import_structure[key].append/extend
__UpperCAmelCase = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__UpperCAmelCase = [(pattern.search(A_ ).groups()[0] if pattern.search(A_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__UpperCAmelCase = [(i, key) for i, key in enumerate(A_ ) if key is not None]
__UpperCAmelCase = [x[0] for x in sorted(A_ , key=lambda A_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__UpperCAmelCase = 0
__UpperCAmelCase = []
for i in range(len(A_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__UpperCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A_ )
count += 1
# And we put our main block back together with its first and last line.
__UpperCAmelCase = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A_ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(A_ , "w" ) as f:
f.write("\n".join(A_ ) )
def __lowerCAmelCase ( A_ : int=True ) -> Dict:
__UpperCAmelCase = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__UpperCAmelCase = sort_imports(os.path.join(A_ , "__init__.py" ) , check_only=A_ )
if result:
__UpperCAmelCase = [os.path.join(A_ , "__init__.py" )]
if len(A_ ) > 0:
raise ValueError(F'''Would overwrite {len(A_ )} files, run `make style`.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 221 | def __lowerCAmelCase ( A_ : int ) -> int:
__UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase ( A_ : int = 1_00 ) -> int:
__UpperCAmelCase = 1
__UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
__UpperCAmelCase = pre_numerator
__UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
__UpperCAmelCase = cur_numerator
__UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(A_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 221 | 1 |
"""simple docstring"""
import math
class UpperCamelCase :
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 0.0
_SCREAMING_SNAKE_CASE : int = 0.0
for i in range(len(snake_case__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
for i in range(len(snake_case__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCAmelCase ( ) -> None:
# Training Examples ( m, n )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_SCREAMING_SNAKE_CASE : int = SelfOrganizingMap()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 3
_SCREAMING_SNAKE_CASE : Tuple = 0.5
for _ in range(lowerCamelCase__ ):
for j in range(len(lowerCamelCase__ ) ):
# training sample
_SCREAMING_SNAKE_CASE : List[Any] = training_samples[j]
# Compute the winning vector
_SCREAMING_SNAKE_CASE : List[Any] = self_organizing_map.get_winner(lowerCamelCase__, lowerCamelCase__ )
# Update the winning vector
_SCREAMING_SNAKE_CASE : List[Any] = self_organizing_map.update(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# classify test sample
_SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 0, 0, 1]
_SCREAMING_SNAKE_CASE : Any = self_organizing_map.get_winner(lowerCamelCase__, lowerCamelCase__ )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 295 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ : Tuple = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : int=None, lowerCamelCase__ : Any=None, lowerCamelCase__ : Any=None ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = True
while ask_again:
_SCREAMING_SNAKE_CASE : List[str] = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict=[], lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : str=0 ) -> str:
_SCREAMING_SNAKE_CASE : int = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE : int = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 295 | 1 |
import json
import sys
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = results[benchmark_name]
_UpperCAmelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCAmelCase = '''| metric |'''
_UpperCAmelCase = '''|--------|'''
_UpperCAmelCase = '''| new / old (diff) |'''
for metric_name in sorted(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = benchmark_res[metric_name]
_UpperCAmelCase = metric_vals['''new''']
_UpperCAmelCase = metric_vals.get('''old''' , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = metric_vals.get('''diff''' , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = F''' {new_val:f}''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
UpperCAmelCase_ = sys.argv[1]
UpperCAmelCase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 32 |
'''simple docstring'''
from math import factorial, pi
def _lowercase ( __A ,__A = 30 ):
'''simple docstring'''
if not isinstance(__A ,(int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__A ,__A ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
__UpperCamelCase = float(__A )
__UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _lowercase ( __A ,__A = 30 ):
'''simple docstring'''
if not isinstance(__A ,(int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__A ,__A ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
__UpperCamelCase = float(__A )
__UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 601 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : list[list[str]] , __snake_case : int , ) -> None:
__A : Any = len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , )
def _lowerCAmelCase ( __snake_case : int ) -> None:
__A : list[list[str]] = []
depth_first_search([] , [] , [] , __snake_case , __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print('' )
print(len(__snake_case ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 338 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase__ : Tuple = False
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[int] = '''ybelkada/fonts'''
def _lowerCAmelCase ( ) -> List[Any]:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Tuple:
requires_backends(__snake_case , ['torch'] )
_check_torch_version()
__A : Any = image_tensor.unsqueeze(0 )
__A : List[str] = torch.nn.functional.unfold(__snake_case , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__A : List[str] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __snake_case , __snake_case , -1 )
__A : List[str] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _lowerCAmelCase ( __snake_case : str , __snake_case : int = 36 , __snake_case : str = "black" , __snake_case : str = "white" , __snake_case : int = 5 , __snake_case : int = 5 , __snake_case : int = 5 , __snake_case : int = 5 , __snake_case : Optional[bytes] = None , __snake_case : Optional[str] = None , ) -> Image.Image:
requires_backends(__snake_case , 'vision' )
# Add new lines so that each line is no more than 80 characters.
__A : Any = textwrap.TextWrapper(width=80 )
__A : Dict = wrapper.wrap(text=__snake_case )
__A : Any = '\n'.join(__snake_case )
if font_bytes is not None and font_path is None:
__A : Any = io.BytesIO(__snake_case )
elif font_path is not None:
__A : Optional[Any] = font_path
else:
__A : Optional[int] = hf_hub_download(__snake_case , 'Arial.TTF' )
__A : List[str] = ImageFont.truetype(__snake_case , encoding='UTF-8' , size=__snake_case )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__A : Optional[Any] = ImageDraw.Draw(Image.new('RGB' , (1, 1) , __snake_case ) )
__A ,__A ,__A ,__A : List[Any] = temp_draw.textbbox((0, 0) , __snake_case , __snake_case )
# Create the actual image with a bit of padding around the text.
__A : str = text_width + left_padding + right_padding
__A : Union[str, Any] = text_height + top_padding + bottom_padding
__A : Dict = Image.new('RGB' , (image_width, image_height) , __snake_case )
__A : Tuple = ImageDraw.Draw(__snake_case )
draw.text(xy=(left_padding, top_padding) , text=__snake_case , fill=__snake_case , font=__snake_case )
return image
def _lowerCAmelCase ( __snake_case : np.ndarray , __snake_case : str , **__snake_case : Optional[Any] ) -> List[str]:
requires_backends(__snake_case , 'vision' )
# Convert to PIL image if necessary
__A : str = to_pil_image(__snake_case )
__A : str = render_text(__snake_case , **__snake_case )
__A : List[Any] = max(header_image.width , image.width )
__A : List[str] = int(image.height * (new_width / image.width) )
__A : int = int(header_image.height * (new_width / header_image.width) )
__A : str = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__A : Tuple = to_numpy_array(__snake_case )
if infer_channel_dimension_format(__snake_case ) == ChannelDimension.LAST:
__A : List[str] = to_channel_dimension_format(__snake_case , ChannelDimension.LAST )
return new_image
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''flattened_patches''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 2048 , _UpperCAmelCase = False , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : List[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
__A : List[str] = do_normalize
__A : List[Any] = do_convert_rgb
__A : Dict = max_patches
__A : Optional[int] = is_vqa
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , 'torch')
_check_torch_version()
# convert to torch
__A : Optional[Any] = to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.FIRST)
__A : Optional[int] = torch.from_numpy(_UpperCAmelCase)
__A ,__A : Dict = patch_size['height'], patch_size['width']
__A ,__A : int = get_image_size(_UpperCAmelCase)
# maximize scale s.t.
__A : Optional[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
__A : Union[str, Any] = max(min(math.floor(scale * image_height / patch_height) , _UpperCAmelCase) , 1)
__A : Tuple = max(min(math.floor(scale * image_width / patch_width) , _UpperCAmelCase) , 1)
__A : Optional[Any] = max(num_feasible_rows * patch_height , 1)
__A : Tuple = max(num_feasible_cols * patch_width , 1)
__A : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=_UpperCAmelCase , antialias=_UpperCAmelCase , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
__A : Dict = torch_extract_patches(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = patches.shape
__A : Optional[int] = patches_shape[1]
__A : List[str] = patches_shape[2]
__A : Optional[Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__A : Dict = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
__A : List[str] = torch.arange(_UpperCAmelCase).reshape([rows, 1]).repeat(1 , _UpperCAmelCase).reshape([rows * columns, 1])
__A : Dict = torch.arange(_UpperCAmelCase).reshape([1, columns]).repeat(_UpperCAmelCase , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__A : List[Any] = row_ids.to(torch.floataa)
__A : Optional[Any] = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__A : str = torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
__A : Dict = torch.nn.functional.pad(_UpperCAmelCase , [0, 0, 0, max_patches - (rows * columns)]).float()
__A : str = to_numpy_array(_UpperCAmelCase)
return result
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
'''simple docstring'''
if image.dtype == np.uinta:
__A : List[str] = image.astype(np.floataa)
# take mean across the whole `image`
__A : Dict = np.mean(_UpperCAmelCase)
__A : Optional[Any] = np.std(_UpperCAmelCase)
__A : Optional[int] = max(_UpperCAmelCase , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : List[str] = patch_size if patch_size is not None else self.patch_size
__A : Any = max_patches if max_patches is not None else self.max_patches
__A : int = self.is_vqa
if kwargs.get('data_format' , _UpperCAmelCase) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
__A : Optional[Any] = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : Union[str, Any] = [convert_to_rgb(_UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
__A : str = [to_numpy_array(_UpperCAmelCase) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
__A : Optional[int] = kwargs.pop('font_bytes' , _UpperCAmelCase)
__A : List[Any] = kwargs.pop('font_path' , _UpperCAmelCase)
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [header_text] * len(_UpperCAmelCase)
__A : str = [
render_header(_UpperCAmelCase , header_text[i] , font_bytes=_UpperCAmelCase , font_path=_UpperCAmelCase)
for i, image in enumerate(_UpperCAmelCase)
]
if do_normalize:
__A : str = [self.normalize(image=_UpperCAmelCase) for image in images]
# convert to torch tensor and permute
__A : Any = [
self.extract_flattened_patches(image=_UpperCAmelCase , max_patches=_UpperCAmelCase , patch_size=_UpperCAmelCase)
for image in images
]
# create attention mask in numpy
__A : Tuple = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
__A : Optional[Any] = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=_UpperCAmelCase)
return encoded_outputs | 338 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase ( lowerCAmelCase : bool = True , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
__magic_name__ : Any = False
if main_process_only:
__magic_name__ : Any = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase , **lowerCAmelCase , disable=lowerCAmelCase ) | 561 |
'''simple docstring'''
import sys
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Optional[int] = len(lowerCAmelCase )
__magic_name__ : Any = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
__magic_name__ : Union[str, Any] = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2 , lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__magic_name__ : Dict = a + chain_length - 1
__magic_name__ : Any = sys.maxsize
for c in range(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : List[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__magic_name__ : Optional[int] = cost
__magic_name__ : str = c
return matrix, sol
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Any ):
"""simple docstring"""
if i == j:
print('A' + str(lowerCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(lowerCAmelCase , lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase , optimal_solution[i][j] + 1 , lowerCAmelCase )
print(')' , end=' ' )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = [30, 35, 15, 5, 10, 20, 25]
__magic_name__ : Tuple = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__magic_name__ , __magic_name__ : List[Any] = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main() | 561 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( _snake_case , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = VideoToVideoSDPipeline
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
_lowercase : Dict = False
# No `output_type`.
_lowercase : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
a__ = CLIPTextModel(lowerCAmelCase__ )
a__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
# 3 frames
a__ = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
a__ = torch.manual_seed(lowerCAmelCase__ )
else:
a__ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a__ = self.get_dummy_components()
a__ = VideoToVideoSDPipeline(**lowerCAmelCase__ )
a__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ = self.get_dummy_inputs(lowerCAmelCase__ )
a__ = '''np'''
a__ = sd_pipe(**lowerCAmelCase__ ).frames
a__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
a__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _UpperCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
a__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
a__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowerCAmelCase__ )
a__ = video.to('''cuda''' )
a__ = '''Spiderman is surfing'''
a__ = pipe(lowerCAmelCase__ , video=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=3 , output_type='''pt''' ).frames
a__ = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 701 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = SpeechTaTokenizer
_lowercase : Optional[int] = False
_lowercase : List[Any] = True
def _UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
a__ = SpeechTaTokenizer(SCREAMING_SNAKE_CASE )
a__ = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
a__ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = '''this is a test'''
a__ = '''this is a test'''
return input_text, output_text
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2_0 , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
a__ , a__ = self.get_input_output_texts(SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
a__ = tokenizer.decode(SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
return text, ids
def _UpperCAmelCase ( self ) -> Tuple:
a__ = '''<pad>'''
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> str:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 8_1 )
def _UpperCAmelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def _UpperCAmelCase ( self ) -> str:
a__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
a__ = tokenizer.vocab_size
a__ = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
a__ = tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
a__ = tokenizer.vocab_size
a__ = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , all_size + len(SCREAMING_SNAKE_CASE ) )
a__ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
a__ = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE )
a__ = tokenizer.vocab_size
a__ = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , all_size_a + len(SCREAMING_SNAKE_CASE ) )
a__ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = self.get_tokenizer()
a__ = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
a__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
a__ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
a__ = {
'''input_ids''': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=SCREAMING_SNAKE_CASE , )
| 148 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
SCREAMING_SNAKE_CASE = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = ["input_ids", "attention_mask"]
lowercase_ : List[int] = []
def __init__( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<s>" , snake_case__ : Any="</s>" , snake_case__ : Union[str, Any]="<pad>" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Union[str, Any]="[MASK]" , snake_case__ : List[str]="[CLS]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase__ : List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase__ : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase__ : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
UpperCAmelCase__ : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase__ : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
UpperCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , mask_token=snake_case__ , cls_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCAmelCase__ : Any = vocab_file
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.__dict__.copy()
UpperCAmelCase__ : List[str] = None
return state
def __setstate__( self : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self : int , snake_case__ : str ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.sp_model.IdToPiece(snake_case__ )
return token
def UpperCamelCase ( self : Any , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = []
UpperCAmelCase__ : str = ""
UpperCAmelCase__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = []
else:
current_sub_tokens.append(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCamelCase ( self : Dict , snake_case__ : List[int] , snake_case__ : bool = False , snake_case__ : bool = None , snake_case__ : bool = True , **snake_case__ : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = kwargs.pop("use_source_tokenizer" , snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
UpperCAmelCase__ : Any = []
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase__ : List[Any] = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(snake_case__ ) )
else:
UpperCAmelCase__ : Dict = "".join(snake_case__ )
UpperCAmelCase__ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase__ : Union[str, Any] = self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def UpperCamelCase ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
UpperCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def UpperCamelCase ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 199 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case_ ( lowercase__ = 3 ):
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 1_0:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCAmelCase__ : Optional[int] = QuantumRegister(lowercase__ , "qr" )
UpperCAmelCase__ : List[Any] = ClassicalRegister(lowercase__ , "cr" )
UpperCAmelCase__ : List[str] = QuantumCircuit(lowercase__ , lowercase__ )
UpperCAmelCase__ : Union[str, Any] = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
UpperCAmelCase__ : Union[str, Any] = Aer.get_backend("qasm_simulator" )
UpperCAmelCase__ : Optional[int] = execute(lowercase__ , lowercase__ , shots=1_0_0_0_0 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 199 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = RoCBertTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def __snake_case ( self ):
super().setUp()
A__ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A__ : int = {}
A__ : Dict = {}
for i, value in enumerate(UpperCamelCase__ ):
A__ : int = i
A__ : Any = i
A__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Tuple = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCamelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __snake_case ( self ):
A__ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self ):
A__ : List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self ):
A__ : str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Any = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self ):
A__ : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A__ : Union[str, Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
A__ : List[str] = i
A__ : Dict = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __snake_case ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A__ : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
A__ : Dict = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
A__ : Tuple = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , '''do_lower_case''' ) else False
A__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __snake_case ( self ):
A__ : int = ['''的''', '''人''', '''有''']
A__ : Tuple = ''''''.join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : List[str] = True
A__ : List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : List[str] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
A__ : int = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = False
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
A__ : str = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : Tuple = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __snake_case ( self ):
A__ : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : str = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase__ )
A__ : List[str] = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
A__ : Union[str, Any] = '''你好,你是谁'''
A__ : Optional[int] = tokenizer.tokenize(UpperCamelCase__ )
A__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
A__ : int = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ )
A__ : Any = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ )
A__ : Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Any = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 700 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 55 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = CodeGenTokenizer
_snake_case = CodeGenTokenizerFast
_snake_case = True
_snake_case = {'''add_prefix_space''': True}
_snake_case = False
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = '''lower newer'''
UpperCamelCase = '''lower newer'''
return input_text, output_text
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = '''lower newer'''
UpperCamelCase = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase__ )
UpperCamelCase = '''lower newer'''
# Testing tokenization
UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing conversion to ids without special tokens
UpperCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing conversion to ids with special tokens
UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase__ )
UpperCamelCase = tokenizer.encode(lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing the unknown token
UpperCamelCase = tokens + [rust_tokenizer.unk_token]
UpperCamelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , lowerCamelCase__=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCamelCase = tokenizer.pad_token_id
UpperCamelCase = tokenizer(lowerCamelCase__ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
UpperCamelCase = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncate=lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase = tokenizer(*lowerCamelCase__ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
UpperCamelCase = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncate=lowerCamelCase__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''$$$'''
UpperCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase__ , add_bos_token=lowerCamelCase__ )
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = tokenizer(lowerCamelCase__ )
UpperCamelCase = tokenizer(lowerCamelCase__ )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase = tokenizer.decode(out_s.input_ids )
UpperCamelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
UpperCamelCase = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
UpperCamelCase = '''\nif len_a > len_b: result = a\nelse: result = b'''
UpperCamelCase = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
UpperCamelCase = tokenizer.decode(lowerCamelCase__ , truncate_before_pattern=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 212 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''', [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42, dataset_name='''my_dataset''')}),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42)}),
SplitDict({'''train''': SplitInfo()}),
], )
def __snake_case ( _UpperCAmelCase : SplitDict):
UpperCamelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
UpperCamelCase = SplitDict._from_yaml_list(_UpperCAmelCase)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''', [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase), SplitInfo(dataset_name='''my_dataset''')])
def __snake_case ( _UpperCAmelCase : Dict):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCamelCase = asdict(SplitDict({'''train''': split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 212 | 1 |
import math
from collections.abc import Callable
def snake_case (__lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : float = xa
_snake_case : float = xa
while True:
if x_n == x_na or function(__lowercase ) == function(__lowercase ):
raise ZeroDivisionError("float division by zero, could not find root" )
_snake_case : float = x_na - (
function(__lowercase ) / ((function(__lowercase ) - function(__lowercase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_snake_case : Optional[int] = x_na
_snake_case : int = x_na
def snake_case (__lowercase ) -> float:
'''simple docstring'''
return math.pow(__lowercase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 580 | def snake_case (__lowercase , __lowercase , __lowercase ) -> list:
'''simple docstring'''
_snake_case : int = len(__lowercase )
_snake_case : int = [[0] * n for i in range(__lowercase )]
for i in range(__lowercase ):
_snake_case : str = y_points[i]
for i in range(2 , __lowercase ):
for j in range(__lowercase , __lowercase ):
_snake_case : Union[str, Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 580 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCAmelCase_ ="""src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ =re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ =re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ =re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ =re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ =re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ =re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ =re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ =re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ =re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ =re.compile(R"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ =re.compile(R"""^\s*else:""")
def UpperCAmelCase ( _snake_case ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def UpperCAmelCase ( _snake_case ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase ( _snake_case , _snake_case ):
def find_duplicates(_snake_case ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase = []
for key in import_dict_objects.keys():
lowerCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCAmelCase ( ):
lowerCAmelCase = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def UpperCAmelCase ( ):
lowerCAmelCase = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ =[
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def UpperCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase = spec.loader.load_module()
lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 708 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case )
lowerCAmelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 33 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = AltDiffusionPipeline
lowerCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTextModel(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ : List[str] = 77
SCREAMING_SNAKE_CASE__ : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : List[Any]=0 ):
if str(_lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Optional[Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowercase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaSeriesModelWithTransformation(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = AltDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = alt_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ : Optional[int] = RobertaSeriesModelWithTransformation(_lowercase )
SCREAMING_SNAKE_CASE__ : str = text_encoder
SCREAMING_SNAKE_CASE__ : Any = AltDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : int = alt_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ):
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Any = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : str = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = alt_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Tuple = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_lowercase , safety_checker=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = alt_pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__ : Any = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 35 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
snake_case = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
return F'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str:
snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> None:
snake_case = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
snake_case , snake_case = os.path.split(__lowerCAmelCase )
if filepath != old_path:
snake_case = print_path(__lowerCAmelCase , __lowerCAmelCase )
snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
snake_case = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 369 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase__ = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __UpperCAmelCase ( lowercase=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class a ( __UpperCAmelCase ):
_snake_case : str = None
_snake_case : List[Any] = None
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
with TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_UpperCAmelCase = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_UpperCAmelCase = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
_UpperCAmelCase = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_UpperCAmelCase = dataset_module_factory("""wikipedia""" ,cache_dir=__lowerCAmelCase )
_UpperCAmelCase = import_main_class(dataset_module.module_path )
_UpperCAmelCase = builder_cls(
cache_dir=__lowerCAmelCase ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_UpperCAmelCase = None
builder_instance.download_and_prepare()
_UpperCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = dataset_module_factory("""wikipedia""" ,cache_dir=__lowerCAmelCase )
_UpperCAmelCase = import_main_class(dataset_module.module_path ,dataset=__lowerCAmelCase )
_UpperCAmelCase = builder_cls(
cache_dir=__lowerCAmelCase ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
_UpperCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCAmelCase ,__lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] ,__lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 712 | """simple docstring"""
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Tuple=False ):
super().__init__()
_UpperCAmelCase = n_token
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_proj
_UpperCAmelCase = cutoffs + [n_token]
_UpperCAmelCase = [0] + self.cutoffs
_UpperCAmelCase = div_val
_UpperCAmelCase = self.cutoffs[0]
_UpperCAmelCase = len(self.cutoffs ) - 1
_UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
_UpperCAmelCase = keep_order
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
if proj is None:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase = hidden[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase = labels != -100
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
_UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
_UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
_UpperCAmelCase = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] ):
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase = logprob_i
return out
| 275 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
A = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
A = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
A = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A = model(A_ )['last_hidden_state'].detach()
self.assertEqual(output.shape ,A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
A = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
A = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
A = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A = model(A_ )['last_hidden_state'].detach()
self.assertEqual(output.shape ,A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) ) | 91 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
return 10 - x * x
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
if equation(lowercase_ ) * equation(lowercase_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCamelCase = (a + b) / 2
# Check if middle point is root
if equation(lowercase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowercase_ ) * equation(lowercase_ ) < 0:
__UpperCamelCase = c
else:
__UpperCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 375 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = model.config
__UpperCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
__UpperCamelCase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
if "encoder.model" in name:
__UpperCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
__UpperCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
__UpperCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__UpperCamelCase = '''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__UpperCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__UpperCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
__UpperCamelCase = '''encoder.layernorm.bias'''
return name
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
__UpperCamelCase = key.split('''.''' )
__UpperCamelCase = int(key_split[3] )
__UpperCamelCase = int(key_split[5] )
__UpperCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[dim : dim * 2, :]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val[:dim]
__UpperCamelCase = val[dim : dim * 2]
__UpperCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCamelCase = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
"""simple docstring"""
__UpperCamelCase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
__UpperCamelCase , __UpperCamelCase = get_configs(lowercase_ )
__UpperCamelCase = DonutSwinModel(lowercase_ )
__UpperCamelCase = MBartForCausalLM(lowercase_ )
__UpperCamelCase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
__UpperCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
__UpperCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
__UpperCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__UpperCamelCase = DonutProcessor(lowercase_ , lowercase_ )
__UpperCamelCase = processor(lowercase_ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__UpperCamelCase = '''When is the coffee break?'''
__UpperCamelCase = task_prompt.replace('''{user_input}''' , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
__UpperCamelCase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors='''pt''' )[
'''input_ids'''
]
__UpperCamelCase = original_model.encoder.model.patch_embed(lowercase_ )
__UpperCamelCase , __UpperCamelCase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
__UpperCamelCase = original_model.encoder(lowercase_ )
__UpperCamelCase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
__UpperCamelCase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
__UpperCamelCase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
a_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 375 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case ):
# Initialise PyTorch model
_lowerCAmelCase = FunnelConfig.from_json_file(snake_case )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase = FunnelBaseModel(snake_case ) if base_model else FunnelModel(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(snake_case , snake_case , snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
_lowercase: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
_lowercase: List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 192 | from math import ceil, sqrt
def _lowerCamelCase ( snake_case = 1_000_000 ):
_lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 192 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE = random.Random()
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ):
"""simple docstring"""
if rng is None:
_lowercase : List[str] = global_rng
_lowercase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCamelCase (unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : Any=4_0_0 , lowerCamelCase_ : int=2_0_0_0 , lowerCamelCase_ : str=1_0 , lowerCamelCase_ : List[str]=1_6_0 , lowerCamelCase_ : Optional[Any]=8 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=4_0_0_0 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Optional[Any]=True , ):
"""simple docstring"""
_lowercase : str = parent
_lowercase : List[Any] = batch_size
_lowercase : Union[str, Any] = min_seq_length
_lowercase : Dict = max_seq_length
_lowercase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase : str = padding_value
_lowercase : Dict = sampling_rate
_lowercase : Any = return_attention_mask
_lowercase : Union[str, Any] = do_normalize
_lowercase : Optional[Any] = feature_size
_lowercase : Optional[Any] = chunk_length
_lowercase : List[Any] = hop_length
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Any=False , lowerCamelCase_ : Union[str, Any]=False ):
"""simple docstring"""
def _flatten(lowerCamelCase_ : str ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
_lowercase : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase : List[str] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : str = WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Optional[Any] = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
_lowercase : Optional[int] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
_lowercase : str = feat_extract_first.to_dict()
_lowercase : Optional[int] = feat_extract_second.to_dict()
_lowercase : Tuple = feat_extract_first.mel_filters
_lowercase : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Dict = os.path.join(lowerCamelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase_ )
_lowercase : Tuple = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
_lowercase : Tuple = feat_extract_first.to_dict()
_lowercase : List[str] = feat_extract_second.to_dict()
_lowercase : List[str] = feat_extract_first.mel_filters
_lowercase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowercase : Optional[Any] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
_lowercase : List[Any] = feature_extractor(lowerCamelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase : str = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowercase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
_lowercase : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_lowercase : str = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase : List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowercase : Optional[int] = np.asarray(lowerCamelCase_ )
_lowercase : int = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_lowercase : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test truncation required
_lowercase : Optional[int] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowercase : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
_lowercase : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase : Optional[int] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs_truncated]
_lowercase : Union[str, Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_lowercase : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
import torch
_lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : Any = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowercase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase : str = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ):
"""simple docstring"""
_lowercase : List[Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowercase : List[str] = ds.sort('id' ).select(range(lowerCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : List[str] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowercase : Optional[Any] = self._load_datasamples(1 )
_lowercase : str = WhisperFeatureExtractor()
_lowercase : Tuple = feature_extractor(lowerCamelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase_ , atol=1E-4 ) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : Union[str, Any] = self._load_datasamples(1 )[0]
_lowercase : str = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowercase : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase_ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ ) - 1 ) < 1E-3 ) )
| 283 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : List[str] = "distilbert"
lowerCamelCase : Tuple = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , a__=3_05_22 , a__=5_12 , a__=False , a__=6 , a__=12 , a__=7_68 , a__=4 * 7_68 , a__=0.1 , a__=0.1 , a__="gelu" , a__=0.02 , a__=0.1 , a__=0.2 , a__=0 , **a__ , ) -> str:
'''simple docstring'''
__snake_case :List[Any] = vocab_size
__snake_case :Dict = max_position_embeddings
__snake_case :Optional[int] = sinusoidal_pos_embds
__snake_case :int = n_layers
__snake_case :Any = n_heads
__snake_case :Dict = dim
__snake_case :str = hidden_dim
__snake_case :Tuple = dropout
__snake_case :Dict = attention_dropout
__snake_case :int = activation
__snake_case :str = initializer_range
__snake_case :Dict = qa_dropout
__snake_case :Dict = seq_classif_dropout
super().__init__(**a__ , pad_token_id=a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case :List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case :Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 455 |
lowerCamelCase__ = 8.3_1_4_4_5_9_8
def UpperCamelCase ( snake_case__ : float ,snake_case__ : float ):
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase__ = 300
lowerCamelCase__ = 28
lowerCamelCase__ = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 455 | 1 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Any ):
lowerCAmelCase_ : Tuple =parent
lowerCAmelCase_ : int =config_class
lowerCAmelCase_ : int =has_text_modality
lowerCAmelCase_ : Union[str, Any] =kwargs
lowerCAmelCase_ : List[str] =common_properties
def __A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] =self.config_class(**self.inputs_dict )
lowerCAmelCase_ : int =(
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCamelCase_ ):
try:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(
getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ , msg=F'`{name} value {idx} expected, but was {getattr(UpperCamelCase_ , UpperCamelCase_ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCamelCase_ ):
try:
lowerCAmelCase_ : int =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ , msg=F'`{name} value {idx} expected, but was {getattr(UpperCamelCase_ , UpperCamelCase_ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] =self.config_class(**self.inputs_dict )
lowerCAmelCase_ : Dict =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : str =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : str =os.path.join(UpperCamelCase_ , '''config.json''' )
config_first.to_json_file(UpperCamelCase_ )
lowerCAmelCase_ : Dict =self.config_class.from_json_file(UpperCamelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self : List[str] ):
lowerCAmelCase_ : int =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ : Any =self.config_class.from_pretrained(UpperCamelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self : str ):
lowerCAmelCase_ : Dict =self.config_class(**self.inputs_dict )
lowerCAmelCase_ : Dict ='''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : List[Any] =os.path.join(UpperCamelCase_ , UpperCamelCase_ )
config_first.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =self.config_class.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCAmelCase_ : List[str] =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __A ( self : List[Any] ):
if self.config_class.is_composition:
return
lowerCAmelCase_ : Any =self.config_class()
self.parent.assertIsNotNone(UpperCamelCase_ )
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : int =copy.deepcopy(UpperCamelCase_ )
lowerCAmelCase_ : Dict =self.config_class(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(UpperCamelCase_ , UpperCamelCase_ ) != value:
wrong_values.append((key, getattr(UpperCamelCase_ , UpperCamelCase_ ), value) )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase_ : List[str] ='''\n'''.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def __A ( self : Dict ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 702 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = '''marian'''
_UpperCamelCase : List[str] = ['''past_key_values''']
_UpperCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple=58101 , UpperCamelCase_ : int=None , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=12 , UpperCamelCase_ : List[Any]=4096 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : int=12 , UpperCamelCase_ : Optional[Any]=4096 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : int=1024 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=0.0_2 , UpperCamelCase_ : Union[str, Any]=58100 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Union[str, Any]=58100 , UpperCamelCase_ : Dict=0 , UpperCamelCase_ : int=0 , UpperCamelCase_ : int=True , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase_ : Tuple =vocab_size
lowerCAmelCase_ : int =decoder_vocab_size or vocab_size
lowerCAmelCase_ : int =max_position_embeddings
lowerCAmelCase_ : Any =d_model
lowerCAmelCase_ : List[Any] =encoder_ffn_dim
lowerCAmelCase_ : List[Any] =encoder_layers
lowerCAmelCase_ : Any =encoder_attention_heads
lowerCAmelCase_ : Optional[int] =decoder_ffn_dim
lowerCAmelCase_ : List[str] =decoder_layers
lowerCAmelCase_ : Union[str, Any] =decoder_attention_heads
lowerCAmelCase_ : List[str] =dropout
lowerCAmelCase_ : int =attention_dropout
lowerCAmelCase_ : Optional[int] =activation_dropout
lowerCAmelCase_ : Union[str, Any] =activation_function
lowerCAmelCase_ : List[str] =init_std
lowerCAmelCase_ : List[Any] =encoder_layerdrop
lowerCAmelCase_ : Optional[int] =decoder_layerdrop
lowerCAmelCase_ : int =use_cache
lowerCAmelCase_ : Tuple =encoder_layers
lowerCAmelCase_ : Any =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Union[str, Any] =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __A ( self : str ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ : Any ={0: '''batch'''}
lowerCAmelCase_ : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase_ : List[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase_ : int ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] =self.num_layers
for i in range(UpperCamelCase_ ):
lowerCAmelCase_ : int ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : List[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __A ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[str] =super().outputs
else:
lowerCAmelCase_ : Optional[Any] =super(UpperCamelCase_ , self ).outputs
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.num_layers
for i in range(UpperCamelCase_ ):
lowerCAmelCase_ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __A ( self : int , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
lowerCAmelCase_ : List[Any] =seq_length if not self.use_past else 1
lowerCAmelCase_ : Dict =self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] ={F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : List[Any] =dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Dict =common_inputs['''input_ids'''].shape
lowerCAmelCase_ : Tuple =common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase_ , lowerCAmelCase_ : Any =self.num_attention_heads
lowerCAmelCase_ : Optional[int] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Optional[int] =decoder_seq_length + 3
lowerCAmelCase_ : List[Any] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : Dict =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
lowerCAmelCase_ : int =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.num_layers
lowerCAmelCase_ : Union[str, Any] =min(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
lowerCAmelCase_ : Union[str, Any] ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
lowerCAmelCase_ : List[str] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __A ( self : Optional[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : str =self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : int =seqlen + 2
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =self.num_layers
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.num_attention_heads
lowerCAmelCase_ : Tuple =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Any =common_inputs['''attention_mask'''].dtype
lowerCAmelCase_ : List[str] =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
lowerCAmelCase_ : List[str] =[
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __A ( self : List[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Tuple =compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : List[Any] =tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowerCAmelCase_ : Tuple =compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : List[Any] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase_ : Any =dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def __A ( self : List[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Optional[Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
lowerCAmelCase_ : int =self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def __A ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Optional[Any] =super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
lowerCAmelCase_ : Dict =super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@property
def __A ( self : Union[str, Any] ):
return 1E-4
| 305 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase ( __a ):
a__ :Union[str, Any] = '''vit_mae'''
def __init__(self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-1_2 , __UpperCamelCase=224 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=16 , __UpperCamelCase=512 , __UpperCamelCase=8 , __UpperCamelCase=2_048 , __UpperCamelCase=0.75 , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
super().__init__(**__UpperCamelCase )
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : List[Any] = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : Tuple = intermediate_size
UpperCamelCase_ : Union[str, Any] = hidden_act
UpperCamelCase_ : Tuple = hidden_dropout_prob
UpperCamelCase_ : Tuple = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = initializer_range
UpperCamelCase_ : Tuple = layer_norm_eps
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : List[Any] = patch_size
UpperCamelCase_ : int = num_channels
UpperCamelCase_ : int = qkv_bias
UpperCamelCase_ : Union[str, Any] = decoder_num_attention_heads
UpperCamelCase_ : Dict = decoder_hidden_size
UpperCamelCase_ : Optional[int] = decoder_num_hidden_layers
UpperCamelCase_ : str = decoder_intermediate_size
UpperCamelCase_ : Dict = mask_ratio
UpperCamelCase_ : Tuple = norm_pix_loss
| 635 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase ( __a ):
a__ :List[str] = '''megatron-bert'''
def __init__(self , __UpperCamelCase=29_056 , __UpperCamelCase=1_024 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=4_096 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-1_2 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = vocab_size
UpperCamelCase_ : Union[str, Any] = hidden_size
UpperCamelCase_ : Any = num_hidden_layers
UpperCamelCase_ : str = num_attention_heads
UpperCamelCase_ : List[Any] = hidden_act
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_dropout_prob
UpperCamelCase_ : Dict = attention_probs_dropout_prob
UpperCamelCase_ : Any = max_position_embeddings
UpperCamelCase_ : str = type_vocab_size
UpperCamelCase_ : Tuple = initializer_range
UpperCamelCase_ : List[str] = layer_norm_eps
UpperCamelCase_ : Tuple = position_embedding_type
UpperCamelCase_ : Tuple = use_cache
| 635 | 1 |
class __snake_case :
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = graph
self._normalize_graph(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = len(_lowercase )
SCREAMING_SNAKE_CASE__ = None
def __a ( self : Any , _lowercase : Optional[Any] , _lowercase : str ):
"""simple docstring"""
if sources is int:
SCREAMING_SNAKE_CASE__ = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE__ = [sinks]
if len(_lowercase ) == 0 or len(_lowercase ) == 0:
return
SCREAMING_SNAKE_CASE__ = sources[0]
SCREAMING_SNAKE_CASE__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowercase ) > 1 or len(_lowercase ) > 1:
SCREAMING_SNAKE_CASE__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
SCREAMING_SNAKE_CASE__ = max_input_flow
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE__ = max_input_flow
SCREAMING_SNAKE_CASE__ = size - 1
def __a ( self : Dict ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __a ( self : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = algorithm(self )
class __snake_case :
def __init__( self : List[Any] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = flow_network
SCREAMING_SNAKE_CASE__ = flow_network.verticesCount
SCREAMING_SNAKE_CASE__ = flow_network.sourceIndex
SCREAMING_SNAKE_CASE__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE__ = flow_network.graph
SCREAMING_SNAKE_CASE__ = False
def __a ( self : Optional[Any] ):
"""simple docstring"""
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE__ = True
def __a ( self : List[str] ):
"""simple docstring"""
pass
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Tuple , _lowercase : Any ):
"""simple docstring"""
super().__init__(_lowercase )
# use this to save your result
SCREAMING_SNAKE_CASE__ = -1
def __a ( self : Any ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __snake_case ( lowerCamelCase_ ):
def __init__( self : List[Any] , _lowercase : str ):
"""simple docstring"""
super().__init__(_lowercase )
SCREAMING_SNAKE_CASE__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE__ = [0] * self.verticies_count
SCREAMING_SNAKE_CASE__ = [0] * self.verticies_count
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE__ = 0
while i < len(_lowercase ):
SCREAMING_SNAKE_CASE__ = vertices_list[i]
SCREAMING_SNAKE_CASE__ = self.heights[vertex_index]
self.process_vertex(_lowercase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowercase ) )
SCREAMING_SNAKE_CASE__ = 0
else:
i += 1
SCREAMING_SNAKE_CASE__ = sum(self.preflow[self.source_index] )
def __a ( self : str , _lowercase : List[str] ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowercase , _lowercase )
self.relabel(_lowercase )
def __a ( self : Optional[int] , _lowercase : Dict , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __a ( self : Tuple , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE__ = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE__ = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = [0]
__lowerCamelCase : List[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : Tuple = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 379 | import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt'''}
__lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__lowerCamelCase : Optional[Any] = {
'''openbmb/cpm-ant-10b''': 1024,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ = index
return vocab
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : int="<unk>" , _lowercase : int=2_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = max_input_chars_per_word
def __a ( self : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
while start < len(_lowercase ):
SCREAMING_SNAKE_CASE__ = len(_lowercase )
SCREAMING_SNAKE_CASE__ = None
while start < end:
SCREAMING_SNAKE_CASE__ = """""".join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
SCREAMING_SNAKE_CASE__ = end
return sub_tokens
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = False
def __init__( self : int , _lowercase : str , _lowercase : List[Any]="<d>" , _lowercase : List[Any]="</d>" , _lowercase : Union[str, Any]="<s>" , _lowercase : List[str]="</s>" , _lowercase : str="<pad>" , _lowercase : int="<unk>" , _lowercase : List[str]="</n>" , _lowercase : Tuple="</_>" , _lowercase : Any="left" , **_lowercase : Any , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ = bod_token
SCREAMING_SNAKE_CASE__ = eod_token
SCREAMING_SNAKE_CASE__ = load_vocab(_lowercase )
SCREAMING_SNAKE_CASE__ = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def __a ( self : int , _lowercase : Any , **_lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
return token in self.encoder
def __a ( self : List[str] , _lowercase : List[str] ):
"""simple docstring"""
return "".join(_lowercase )
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _lowercase : List[Any] ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
SCREAMING_SNAKE_CASE__ = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
SCREAMING_SNAKE_CASE__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
SCREAMING_SNAKE_CASE__ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder["""\n"""]
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __a ( self : int , _lowercase : List[int] , _lowercase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 379 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : torch.FloatTensor
lowerCAmelCase_ : torch.FloatTensor
lowerCAmelCase_ : Optional[torch.FloatTensor] = None
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 2
@register_to_config
def __init__( self :Tuple , UpperCamelCase__ :float = 0.02 , UpperCamelCase__ :float = 100 , UpperCamelCase__ :float = 1.007 , UpperCamelCase__ :float = 80 , UpperCamelCase__ :float = 0.05 , UpperCamelCase__ :float = 50 , ):
# standard deviation of the initial noise distribution
_a = sigma_max
# setable values
_a = None
_a = None
_a = None # sigma(t_i)
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :int , UpperCamelCase__ :Union[str, torch.device] = None ):
_a = num_inference_steps
_a = np.arange(0 , self.num_inference_steps )[::-1].copy()
_a = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
_a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_a = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :float , UpperCamelCase__ :Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
_a = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_a = 0
# sample eps ~ N(0, S_noise^2 * I)
_a = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
_a = sigma + gamma * sigma
_a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :float , UpperCamelCase__ :float , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :bool = True , ):
_a = sample_hat + sigma_hat * model_output
_a = (sample_hat - pred_original_sample) / sigma_hat
_a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :float , UpperCamelCase__ :float , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :torch.FloatTensor , UpperCamelCase__ :bool = True , ):
_a = sample_prev + sigma_prev * model_output
_a = (sample_prev - pred_original_sample) / sigma_prev
_a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Dict , UpperCamelCase__ :Optional[Any] ):
raise NotImplementedError()
| 388 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :List[str] , UpperCamelCase__ :Dict=0.01 , UpperCamelCase__ :Union[str, Any]=1_000 ):
_a = p_stop
_a = max_length
def __iter__( self :Dict ):
_a = 0
_a = False
while not stop and count < self.max_length:
yield count
count += 1
_a = random.random() < self.p_stop
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str=False , UpperCamelCase__ :int=True ):
_a = [
BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
for i in range(2 )
]
_a = [list(UpperCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase__ ) for shard in batch_sampler_shards] , [len(UpperCamelCase__ ) for e in expected] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_a = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :str=False , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :int=False ):
random.seed(UpperCamelCase__ )
_a = list(UpperCamelCase__ )
_a = [
IterableDatasetShard(
UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , )
for i in range(UpperCamelCase__ )
]
_a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase__ )
iterable_dataset_lists.append(list(UpperCamelCase__ ) )
_a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
self.assertTrue(len(UpperCamelCase__ ) % shard_batch_size == 0 )
_a = []
for idx in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
reference += reference
self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__ )] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = 42
_a = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Edge case with a very small dataset
_a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = SkipBatchSampler(UpperCamelCase__ , 2 )
self.assertListEqual(list(UpperCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = DataLoader(list(range(16 ) ) , batch_size=4 )
_a = skip_first_batches(UpperCamelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
Accelerator()
_a = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 388 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Any = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''time_series_transformer'''
UpperCAmelCase__ : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self :Any ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :str = "student_t" ,__snake_case :str = "nll" ,__snake_case :int = 1 ,__snake_case :List[int] = [1, 2, 3, 4, 5, 6, 7] ,__snake_case :Optional[Union[str, bool]] = "mean" ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :Optional[List[int]] = None ,__snake_case :Optional[List[int]] = None ,__snake_case :int = 32 ,__snake_case :int = 32 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :bool = True ,__snake_case :str = "gelu" ,__snake_case :int = 64 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :int = 1_00 ,__snake_case :float = 0.02 ,__snake_case :Optional[int]=True ,**__snake_case :Optional[Any] ,) -> str:
# time series specific configuration
a__ = prediction_length
a__ = context_length or prediction_length
a__ = distribution_output
a__ = loss
a__ = input_size
a__ = num_time_features
a__ = lags_sequence
a__ = scaling
a__ = num_dynamic_real_features
a__ = num_static_real_features
a__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
a__ = cardinality
else:
a__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
a__ = embedding_dimension
else:
a__ = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
a__ = num_parallel_samples
# Transformer architecture configuration
a__ = input_size * len(__snake_case ) + self._number_of_features
a__ = d_model
a__ = encoder_attention_heads
a__ = decoder_attention_heads
a__ = encoder_ffn_dim
a__ = decoder_ffn_dim
a__ = encoder_layers
a__ = decoder_layers
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = activation_function
a__ = init_std
a__ = use_cache
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : str = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCamelCase : int = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Tuple = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : List[str] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCamelCase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Any = torch.manual_seed(0 )
__lowerCamelCase : List[str] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Tuple = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowerCamelCase : Any = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : List[Any] = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 13 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
from pathlib import Path
import fire
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Tuple:
snake_case__ : List[str] = Path(lowerCamelCase_ )
snake_case__ : Any = Path(lowerCamelCase_ )
dest_dir.mkdir(exist_ok=lowerCamelCase_ )
for path in src_dir.iterdir():
snake_case__ : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case__ : Optional[Any] = dest_dir.joinpath(path.name )
print(lowerCamelCase_ )
dest_path.open('w' ).write('\n'.join(lowerCamelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 706 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ :Union[str, Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
a_ :int = 'hopper-medium-v2'
a_ :Any = gym.make(env_name)
a_ :List[str] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
a_ :Optional[Any] = env.reset()
a_ :Optional[int] = 0
a_ :List[str] = 0
a_ :List[Any] = 10_00
a_ :Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ :int = pipeline(obs, planning_horizon=32)
# execute action in environment
a_ , a_ , a_ , a_ :List[Any] = env.step(denorm_actions)
a_ :Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ :List[Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Dict = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['ConvNextFeatureExtractor']
_a : Any = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 663 | """simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase (ctypes.Structure ):
'''simple docstring'''
_snake_case : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Optional[int] = CursorInfo()
UpperCAmelCase_ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Tuple = CursorInfo()
UpperCAmelCase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase__ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 406 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase (__UpperCamelCase : list[float] ):
"""simple docstring"""
if len(__UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__UpperCamelCase =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowercase :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=4 , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase =prepare_led_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tf.concat(
[tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]] , axis=-1 , )
__UpperCamelCase =global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFLEDModel(config=UpperCamelCase__ ).get_decoder()
__UpperCamelCase =inputs_dict['''input_ids''']
__UpperCamelCase =input_ids[:1, :]
__UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase =1
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =tf.zeros_like(inputs_dict['''attention_mask'''] )
__UpperCamelCase =2
__UpperCamelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase =True
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase__ : Tuple ):
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase__ : Dict ):
__UpperCamelCase =[t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 296 | 0 |
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
__magic_name__ : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
A: Any = int(np.ceil((x_end - xa) / h ) )
A: Union[str, Any] = np.zeros((n + 1,) )
A: Optional[int] = ya
A: int = xa
for k in range(lowerCamelCase__ ):
A: Optional[int] = f(lowerCamelCase__ , y[k] )
A: int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: Optional[int] = f(x + h , y[k] + h * ka )
A: Optional[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 0 |
'''simple docstring'''
A__ : Tuple =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Optional[int] =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Optional[int] ={
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
assert len(str(__SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__A : Union[str, Any] = year // 100
__A : Optional[Any] = (5 * (century % 4) + 2) % 7
__A : List[str] = year % 100
__A : Tuple = centurian % 12
__A : int = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A : int = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int =logging.get_logger(__name__)
A__ : Union[str, Any] ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''dpr'''
def __init__( self : Optional[int] , lowerCamelCase : Tuple=3_05_22 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Tuple=12 , lowerCamelCase : Dict=30_72 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[Any]=5_12 , lowerCamelCase : List[Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Union[str, Any]=1e-1_2 , lowerCamelCase : int=0 , lowerCamelCase : List[str]="absolute" , lowerCamelCase : int = 0 , **lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__A : Dict = vocab_size
__A : int = hidden_size
__A : str = num_hidden_layers
__A : Union[str, Any] = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = intermediate_size
__A : str = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : Dict = type_vocab_size
__A : Dict = initializer_range
__A : Union[str, Any] = layer_norm_eps
__A : int = projection_dim
__A : List[Any] = position_embedding_type
| 499 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Tuple = '''dpt'''
def __init__( self : Tuple , __A : int=768 , __A : List[str]=12 , __A : Tuple=12 , __A : List[str]=3072 , __A : List[str]="gelu" , __A : str=0.0 , __A : Optional[Any]=0.0 , __A : List[str]=0.0_2 , __A : Dict=1e-1_2 , __A : List[Any]=384 , __A : Dict=16 , __A : Union[str, Any]=3 , __A : Any=False , __A : Tuple=True , __A : List[Any]=[2, 5, 8, 11] , __A : Union[str, Any]="project" , __A : Tuple=[4, 2, 1, 0.5] , __A : Any=[96, 192, 384, 768] , __A : Dict=256 , __A : Optional[int]=-1 , __A : Any=False , __A : Dict=True , __A : Optional[int]=0.4 , __A : Any=255 , __A : Any=0.1 , __A : Dict=[1, 1024, 24, 24] , __A : Dict=[0, 1] , __A : int=None , **__A : Dict , ):
super().__init__(**__A )
__A : List[str] = hidden_size
__A : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__A : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__A : List[str] = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__A : int = BitConfig(**__A )
elif isinstance(__A , __A ):
__A : Optional[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
__A : Optional[Any] = backbone_featmap_shape
__A : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__A : str = None
__A : Union[str, Any] = None
__A : List[str] = []
__A : Union[str, Any] = num_hidden_layers
__A : Dict = num_attention_heads
__A : Union[str, Any] = intermediate_size
__A : Dict = hidden_act
__A : Tuple = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : List[str] = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : Tuple = qkv_bias
__A : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__A : Optional[int] = readout_type
__A : List[str] = reassemble_factors
__A : Dict = neck_hidden_sizes
__A : Dict = fusion_hidden_size
__A : Dict = head_in_index
__A : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__A : int = use_auxiliary_head
__A : List[Any] = auxiliary_loss_weight
__A : List[Any] = semantic_loss_ignore_index
__A : List[str] = semantic_classifier_dropout
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 17 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any]=1_0_0 , snake_case__ : Union[str, Any]=1_3 , snake_case__ : List[str]=3_0 , snake_case__ : int=2 , snake_case__ : Dict=3 , snake_case__ : List[Any]=True , snake_case__ : Any=True , snake_case__ : str=3_2 , snake_case__ : Optional[Any]=4 , snake_case__ : List[str]=4 , snake_case__ : Tuple=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=1_0 , snake_case__ : str=0.02 , snake_case__ : List[Any]=3 , snake_case__ : Any=None , snake_case__ : List[Any]=[0, 1, 2, 3] , ) -> Any:
_lowerCamelCase = parent
_lowerCamelCase = 1_0_0
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = out_indices
_lowerCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = num_patches + 1
def _snake_case ( self : Optional[Any] ) -> List[Any]:
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : str ) -> str:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self : Dict , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple ) -> List[Any]:
_lowerCamelCase = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[int] , snake_case__ : Any , snake_case__ : int , snake_case__ : int , snake_case__ : Union[str, Any] ) -> Optional[int]:
_lowerCamelCase = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : int ) -> Tuple:
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[str] ) -> Tuple:
_lowerCamelCase = self.num_labels
_lowerCamelCase = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self : List[Any] ) -> str:
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self : Union[str, Any] ) -> List[str]:
_lowerCamelCase = BeitModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : str ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _snake_case ( self : str ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _snake_case ( self : Any ) -> Union[str, Any]:
pass
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _snake_case ( self : Tuple ) -> Any:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def _snake_case ( self : Tuple ) -> List[str]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : Optional[Any] ) -> Tuple:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _snake_case ( self : Any ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _snake_case ( self : Any ) -> Tuple:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def _snake_case ( self : Union[str, Any] ) -> Any:
if not self.model_tester.is_training:
return
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
_lowerCamelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
_lowerCamelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_lowerCamelCase = model(**snake_case__ ).loss
loss.backward()
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase = False
_lowerCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCamelCase = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
_lowerCamelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_lowerCamelCase = model(**snake_case__ ).loss
loss.backward()
def _snake_case ( self : List[str] ) -> Optional[Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _snake_case ( self : List[str] ) -> Any:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase ( ) -> str:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Optional[int] ) -> Dict:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
_lowerCamelCase = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
_lowerCamelCase = outputs.logits
# verify the logits
_lowerCamelCase = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCamelCase = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def _snake_case ( self : Union[str, Any] ) -> Dict:
_lowerCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**snake_case__ )
_lowerCamelCase = outputs.logits
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCamelCase = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
_lowerCamelCase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def _snake_case ( self : Any ) -> Optional[Any]:
_lowerCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**snake_case__ )
_lowerCamelCase = outputs.logits
# verify the logits
_lowerCamelCase = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCamelCase = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
_lowerCamelCase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def _snake_case ( self : Any ) -> Union[str, Any]:
_lowerCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCamelCase = model.to(snake_case__ )
_lowerCamelCase = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
_lowerCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowerCamelCase = Image.open(ds[0]['file'] )
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**snake_case__ )
_lowerCamelCase = outputs.logits
# verify the logits
_lowerCamelCase = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_lowerCamelCase = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
_lowerCamelCase = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def _snake_case ( self : List[Any] ) -> Any:
_lowerCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCamelCase = model.to(snake_case__ )
_lowerCamelCase = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
_lowerCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowerCamelCase = Image.open(ds[0]['file'] )
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**snake_case__ )
_lowerCamelCase = outputs.logits.detach().cpu()
_lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
_lowerCamelCase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCamelCase = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ ) | 234 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : str=1_3 , snake_case__ : List[Any]=3 , snake_case__ : Union[str, Any]=2_2_4 , snake_case__ : Optional[int]=3_0 , snake_case__ : Tuple=4_0_0 , snake_case__ : Dict=True , snake_case__ : str=None , snake_case__ : Tuple=True , snake_case__ : Optional[int]=[0.5, 0.5, 0.5] , snake_case__ : Dict=[0.5, 0.5, 0.5] , ) -> List[Any]:
_lowerCamelCase = size if size is not None else {'height': 1_8, 'width': 1_8}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
def _snake_case ( self : int ) -> int:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None
def _snake_case ( self : List[Any] ) -> Dict:
_lowerCamelCase = EfficientFormerImageProcessorTester(self )
@property
def _snake_case ( self : List[str] ) -> Union[str, Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
def _snake_case ( self : Tuple ) -> str:
pass
def _snake_case ( self : List[Any] ) -> Any:
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_lowerCamelCase = image_processor(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def _snake_case ( self : Any ) -> Dict:
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_lowerCamelCase = image_processor(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def _snake_case ( self : Dict ) -> str:
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_lowerCamelCase = image_processor(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , ) | 234 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : str = len(list(m.modules())) == 1 or isinstance(UpperCAmelCase_ , nn.Convad) or isinstance(UpperCAmelCase_ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(UpperCAmelCase_)
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase ( self : Union[str, Any]):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase_: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 1
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = True
def __call__( self : Optional[Any] , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : int = Tracker(self.dest)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = Tracker(self.src)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.src_skip , UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.dest_skip , UpperCAmelCase_))
if len(UpperCAmelCase_) != len(UpperCAmelCase_) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCAmelCase_)} operations while'
F' destination module has {len(UpperCAmelCase_)}.')
for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class __lowercase (nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : nn.Module):
super().__init__()
UpperCamelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F'Unexpected layer name {k}'
UpperCamelCase__ : Optional[Any] = len(UpperCAmelCase_) + 1
feature_blocks.append((F'res{block_index}', v))
UpperCamelCase__ : Any = nn.ModuleDict(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tensor):
return get_trunk_forward_outputs(
UpperCAmelCase_ , out_feat_keys=UpperCAmelCase_ , feature_blocks=self._feature_blocks , )
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str):
UpperCamelCase__ : int = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : str):
# default to timm!
if x not in self:
UpperCamelCase__ : List[Any] = self.convert_name_to_timm(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = partial(lambda: (timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_).eval(), None))
else:
UpperCamelCase__ : List[str] = super().__getitem__(UpperCAmelCase_)
return val
class __lowercase (__lowerCamelCase ):
def __getitem__( self : Tuple , UpperCAmelCase_ : str):
if "seer" in x and "in1k" not in x:
UpperCamelCase__ : Optional[Any] = RegNetModel
else:
UpperCamelCase__ : Optional[Any] = RegNetForImageClassification
return val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
for from_key, to_key in keys:
UpperCamelCase__ : str = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}')
return to_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> List[Any]:
print(f'Converting {name}...')
with torch.no_grad():
UpperCamelCase__, UpperCamelCase__ : Any = from_model_func()
UpperCamelCase__ : int = our_model_func(lowerCamelCase_).eval()
UpperCamelCase__ : Union[str, Any] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ , raise_if_mismatch=lowerCamelCase_)
UpperCamelCase__ : Dict = torch.randn((1, 3, 224, 224))
module_transfer(lowerCamelCase_)
if from_state_dict is not None:
UpperCamelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Optional[int] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCamelCase__ : Optional[Any] = manually_copy_vissl_head(lowerCamelCase_ , our_model.state_dict() , lowerCamelCase_)
our_model.load_state_dict(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = our_model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_)
UpperCamelCase__ : Dict = (
our_outputs.logits if isinstance(lowerCamelCase_ , lowerCamelCase_) else our_outputs.last_hidden_state
)
UpperCamelCase__ : Optional[int] = from_model(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = from_output[-1] if type(lowerCamelCase_) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Any = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCamelCase__ : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowerCamelCase_)
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(f'Pushed {name}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True) -> int:
UpperCamelCase__ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase__ : int = 1_000
UpperCamelCase__ : Tuple = (1, num_labels)
UpperCamelCase__ : Dict = 'huggingface/label-files'
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : Dict = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : int = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_)
UpperCamelCase__ : Tuple = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x'),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x'),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x'),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x'),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x'),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='x'),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='x'),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='x'),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='x'),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='x'),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='x'),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='x'),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
}
UpperCamelCase__ : Dict = NameToOurModelFuncMap()
UpperCamelCase__ : Optional[int] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase_ , lowerCamelCase_) -> Tuple[nn.Module, Dict]:
UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , model_dir=str(lowerCamelCase_) , map_location='cpu')
UpperCamelCase__ : List[str] = model_func()
# check if we have a head, if yes add it
UpperCamelCase__ : str = files['classy_state_dict']['base_model']['model']
UpperCamelCase__ : str = model_state_dict['trunk']
model.load_state_dict(lowerCamelCase_)
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCamelCase__ : Dict = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : int = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : List[str] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
# IN1K finetuned
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Optional[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
if model_name:
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 596 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
lowerCAmelCase__ = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
lowerCAmelCase__ = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase__ = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 596 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Dict = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 292 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ : Union[str, Any] = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def lowercase_ ( _lowercase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
def lowercase_ ( _lowercase : str , _lowercase : Dict ):
'''simple docstring'''
if exitstatus == 5:
UpperCAmelCase : List[str] = 0
# Doctest custom flag to ignore output.
snake_case_ : Union[str, Any] = doctest.register_optionflag("""IGNORE_RESULT""")
snake_case_ : Optional[int] = doctest.OutputChecker
class snake_case__ ( lowerCAmelCase_ ):
def __lowerCAmelCase ( self : List[Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase , lowercase , lowercase )
snake_case_ : List[str] = CustomOutputChecker
snake_case_ : Optional[Any] = HfDoctestModule
snake_case_ : List[str] = HfDocTestParser
| 292 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = "t5"
_UpperCamelCase : Any = ["past_key_values"]
_UpperCamelCase : Dict = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _lowerCAmelCase=3_2_1_2_8 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=6_4 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=6 , _lowerCAmelCase=None , _lowerCAmelCase=8 , _lowerCAmelCase=3_2 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1.0 , _lowerCAmelCase="relu" , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ):
_lowercase : Tuple = vocab_size
_lowercase : Union[str, Any] = d_model
_lowercase : Tuple = d_kv
_lowercase : str = d_ff
_lowercase : Union[str, Any] = num_layers
_lowercase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowercase : List[Any] = num_heads
_lowercase : List[Any] = relative_attention_num_buckets
_lowercase : Any = relative_attention_max_distance
_lowercase : str = dropout_rate
_lowercase : Any = layer_norm_epsilon
_lowercase : str = initializer_factor
_lowercase : int = feed_forward_proj
_lowercase : Union[str, Any] = use_cache
_lowercase : List[str] = self.feed_forward_proj.split('-' )
_lowercase : str = act_info[-1]
_lowercase : Optional[int] = act_info[0] == 'gated'
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowercase : Dict = 'gelu_new'
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __snake_case ):
@property
def __a ( self ):
_lowercase : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_lowercase : Optional[int] = 'past_encoder_sequence + sequence'
_lowercase : Tuple = {0: 'batch'}
_lowercase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
_lowercase : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' )
return common_inputs
@property
def __a ( self ):
return 1_3
| 66 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 | from math import sqrt
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase_ : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase_ : Tuple = False
for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase_ : Union[str, Any] = False
break
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase_ : Dict = list(range(2 , n + 1 ) )
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase_ : Optional[int] = 0
# filters actual prime numbers.
UpperCamelCase_ : Union[str, Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_SCREAMING_SNAKE_CASE ):
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase_ : Optional[int] = 2
UpperCamelCase_ : Dict = number
if number == 0 or number == 1:
ans.append(_SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(_SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Tuple = 0
# prime factorization of 'number'
UpperCamelCase_ : int = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = max(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Any = 0
# prime factorization of 'number'
UpperCamelCase_ : Dict = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
UpperCamelCase_ : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase_ : int = get_prime_numbers(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = len(_SCREAMING_SNAKE_CASE )
# run variable for while-loops.
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = None
# exit variable. for break up the loops
UpperCamelCase_ : Any = True
while i < len_pn and loop:
UpperCamelCase_ : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (len(_SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : List[Any] = 0
while numbera != 0:
UpperCamelCase_ : List[str] = numbera % numbera
UpperCamelCase_ : Optional[Any] = numbera
UpperCamelCase_ : Optional[Any] = rest
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase_ : Optional[Any] = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = prime_factorization(_SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
UpperCamelCase_ : int = []
UpperCamelCase_ : Dict = []
UpperCamelCase_ : List[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = 0
UpperCamelCase_ : int = 0
UpperCamelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase_ : List[str] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
UpperCamelCase_ : Tuple = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase_ : List[Any] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase_ : str = 0
UpperCamelCase_ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime(
_SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
assert (
is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase_ : Optional[int] = p_number_a + 1 # jump to the next number
UpperCamelCase_ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(_SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase_ : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase_ : Dict = get_divisors(_SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase_ : List[Any] = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase_ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : List[str] = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase_ : List[Any] = ans
ans += fiba
UpperCamelCase_ : Any = tmp
return ans
| 138 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """poolformer"""
def __init__( self : List[Any] , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Any=1_6 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Optional[Any]=4.0 , UpperCamelCase__ : Optional[Any]=[2, 2, 6, 2] , UpperCamelCase__ : str=[6_4, 1_2_8, 3_2_0, 5_1_2] , UpperCamelCase__ : Dict=[7, 3, 3, 3] , UpperCamelCase__ : int=[4, 2, 2, 2] , UpperCamelCase__ : List[str]=[2, 1, 1, 1] , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : int=1e-5 , UpperCamelCase__ : Any=0.02 , **UpperCamelCase__ : Optional[int] , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = num_channels
__lowerCAmelCase: int = patch_size
__lowerCAmelCase: Any = stride
__lowerCAmelCase: int = padding
__lowerCAmelCase: Any = pool_size
__lowerCAmelCase: Optional[int] = hidden_sizes
__lowerCAmelCase: List[str] = mlp_ratio
__lowerCAmelCase: Union[str, Any] = depths
__lowerCAmelCase: Union[str, Any] = patch_sizes
__lowerCAmelCase: Optional[int] = strides
__lowerCAmelCase: Optional[int] = num_encoder_blocks
__lowerCAmelCase: Any = drop_path_rate
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: List[Any] = use_layer_scale
__lowerCAmelCase: Optional[Any] = layer_scale_init_value
__lowerCAmelCase: Optional[Any] = initializer_range
super().__init__(**UpperCamelCase__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : int = version.parse("""1.11""" )
@property
def lowercase_ ( self : Union[str, Any])-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : str)-> float:
'''simple docstring'''
return 2e-3
| 346 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase: str = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase: Optional[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCamelCase )
if number < 0:
return False
A_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | '''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : str ) -> str:
return "".join(sorted(_UpperCamelCase ) )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> list[str]:
return word_by_signature[signature(_UpperCamelCase )]
__snake_case : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__snake_case : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__snake_case : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__snake_case : str = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 174 | 1 |
_lowerCAmelCase = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 10 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__SCREAMING_SNAKE_CASE : Any = True
except ImportError:
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( lowercase__ : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ : ArgumentParser ):
_lowerCAmelCase =parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowerCamelCase_ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowerCamelCase_ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self : Optional[Any] , lowerCamelCase_ : bool , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=None , *lowerCamelCase_ : Union[str, Any] ):
_lowerCAmelCase =testing
_lowerCAmelCase =testing_file
_lowerCAmelCase =path
def lowerCAmelCase__ ( self : List[Any] ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCAmelCase =[directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
_lowerCAmelCase =(
Path(lowerCamelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCAmelCase =path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase_ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
_lowerCAmelCase =json.load(lowerCamelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase_ , extra_context=lowerCamelCase_ , )
_lowerCAmelCase =[directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
_lowerCAmelCase =json.load(lowerCamelCase_ )
_lowerCAmelCase =configuration["""lowercase_modelname"""]
_lowerCAmelCase =configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F"{directory}/configuration.json" )
_lowerCAmelCase ="""PyTorch""" in generate_tensorflow_pytorch_and_flax
_lowerCAmelCase ="""TensorFlow""" in generate_tensorflow_pytorch_and_flax
_lowerCAmelCase ="""Flax""" in generate_tensorflow_pytorch_and_flax
_lowerCAmelCase =F"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
os.makedirs(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=lowerCamelCase_ )
# Tests require submodules as they have parent imports
with open(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , """w""" ):
pass
shutil.move(
F"{directory}/__init__.py" , F"{model_dir}/__init__.py" , )
shutil.move(
F"{directory}/configuration_{lowercase_model_name}.py" , F"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(lowerCamelCase_ : int ):
with open(lowerCamelCase_ , """r""" ) as f:
_lowerCAmelCase =f.readlines()
with open(lowerCamelCase_ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_{lowercase_model_name}.py" , F"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_tf_{lowercase_model_name}.py" , F"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_tf_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_flax_{lowercase_model_name}.py" , F"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_flax_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/{lowercase_model_name}.md" , F"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
F"{directory}/tokenization_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/tokenization_fast_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ):
# Create temp file
_lowerCAmelCase , _lowerCAmelCase =mkstemp()
_lowerCAmelCase =False
with fdopen(lowerCamelCase_ , """w""" ) as new_file:
with open(lowerCamelCase_ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase_ )
if line_to_copy_below in line:
_lowerCAmelCase =True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase_ )
if not line_found:
raise ValueError(F"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase_ , lowerCamelCase_ )
# Remove original file
remove(lowerCamelCase_ )
# Move new file
move(lowerCamelCase_ , lowerCamelCase_ )
def skip_units(lowerCamelCase_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase_ : Dict ):
with open(lowerCamelCase_ ) as datafile:
_lowerCAmelCase =[]
_lowerCAmelCase =False
_lowerCAmelCase =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCAmelCase =line.split("""\"""" )[1]
_lowerCAmelCase =skip_units(lowerCamelCase_ )
elif "# Below: " in line and "##" not in line:
_lowerCAmelCase =line.split("""\"""" )[1]
_lowerCAmelCase =skip_units(lowerCamelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =[]
elif "# Replace with" in line and "##" not in line:
_lowerCAmelCase =[]
elif "##" not in line:
lines_to_copy.append(lowerCamelCase_ )
remove(lowerCamelCase_ )
replace_in_files(F"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowerCamelCase_ )
| 149 |
from maths.prime_factors import prime_factors
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase =f"Input value of [number={number}] must be an integer"
raise TypeError(lowercase__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = ['image_processor', 'tokenizer']
snake_case__ :List[Any] = 'ChineseCLIPImageProcessor'
snake_case__ :Optional[int] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = self.image_processor
def __call__( self : List[Any] , __magic_name__ : Tuple=None , __magic_name__ : Any=None , __magic_name__ : str=None , **__magic_name__ : List[str] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase__ = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : str , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
| 48 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __magic_name__ ( __a : int , __a : List[str]=False ):
'''simple docstring'''
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : List[str]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = """"""
else:
UpperCamelCase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __magic_name__ ( __a : int , __a : Any , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(__a )
UpperCamelCase__ = val
def __magic_name__ ( __a : int , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 1_000
UpperCamelCase__ = """datasets/huggingface/label-files"""
UpperCamelCase__ = """imagenet-1k-id2label.json"""
UpperCamelCase__ = json.load(open(hf_hub_download(__a , __a ) , """r""" ) )
UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 384
UpperCamelCase__ = 1_536
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 1_024
UpperCamelCase__ = 4_096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 1_024
UpperCamelCase__ = 4_096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(__a )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""" )["""target_encoder"""]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__a )
UpperCamelCase__ = create_rename_keys(__a , base_model=__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , base_model=__a )
model.load_state_dict(__a )
model.eval()
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=__a , image_std=__a )
UpperCamelCase__ = image_processor(images=__a , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**__a )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __a , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 513 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='blip_text_model'
def __init__( self : str , _lowerCamelCase : Optional[Any]=3_0_5_2_4 , _lowerCamelCase : Tuple=7_6_8 , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : Optional[int]=3_0_7_2 , _lowerCamelCase : Optional[int]=7_6_8 , _lowerCamelCase : str=1_2 , _lowerCamelCase : Optional[int]=8 , _lowerCamelCase : Tuple=5_1_2 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Dict=1E-12 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Optional[int]=3_0_5_2_2 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Dict=0 , _lowerCamelCase : Union[str, Any]=1_0_2 , _lowerCamelCase : Dict=True , _lowerCamelCase : Union[str, Any]=True , **_lowerCamelCase : Tuple , ):
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case__ : str = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : int = encoder_hidden_size
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[int] = projection_dim
snake_case__ : str = hidden_dropout_prob
snake_case__ : Dict = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = is_decoder
snake_case__ : Tuple = use_cache
@classmethod
def UpperCAmelCase__ ( cls : Dict , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : str ):
cls._set_token_in_kwargs(UpperCAmelCase__ )
snake_case__ : int = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
snake_case__ : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='blip_vision_model'
def __init__( self : str , _lowerCamelCase : Optional[int]=7_6_8 , _lowerCamelCase : Optional[Any]=3_0_7_2 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : Any=1_2 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Any=3_8_4 , _lowerCamelCase : Optional[Any]=1_6 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : int=1E-5 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : Any=1E-10 , **_lowerCamelCase : Dict , ):
super().__init__(**UpperCAmelCase__ )
snake_case__ : List[str] = hidden_size
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : Tuple = projection_dim
snake_case__ : Dict = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Dict = patch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Dict = attention_dropout
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Optional[int] = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Optional[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase__ )
snake_case__ : Union[str, Any] = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
snake_case__ : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='blip'
_lowerCAmelCase =True
def __init__( self : Optional[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=5_1_2 , _lowerCamelCase : Any=2.6592 , _lowerCamelCase : Any=2_5_6 , **_lowerCamelCase : Any , ):
super().__init__(**UpperCAmelCase__ )
if text_config is None:
snake_case__ : Union[str, Any] = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
snake_case__ : Tuple = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
snake_case__ : List[Any] = BlipTextConfig(**UpperCAmelCase__ )
snake_case__ : List[str] = BlipVisionConfig(**UpperCAmelCase__ )
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : Any = projection_dim
snake_case__ : Optional[int] = logit_scale_init_value
snake_case__ : List[Any] = 1.0
snake_case__ : int = 0.02
snake_case__ : Optional[Any] = image_text_hidden_size
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _lowerCamelCase : BlipTextConfig , _lowerCamelCase : BlipVisionConfig , **_lowerCamelCase : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Optional[int] = copy.deepcopy(self.__dict__ )
snake_case__ : Tuple = self.text_config.to_dict()
snake_case__ : Optional[int] = self.vision_config.to_dict()
snake_case__ : List[str] = self.__class__.model_type
return output
| 718 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase : Tuple = logging.getLogger(__name__)
lowerCamelCase : Union[str, Any] = 5_0 # max width of layer names
lowerCamelCase : Any = 7_0 # max width of quantizer names
def lowercase__( A ):
snake_case__ : Optional[int] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=A , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=A , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=A , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=A , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=A , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=A , type=A , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=A , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowercase__( A ):
if args.calibrator == "max":
snake_case__ : Any = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
snake_case__ : List[Any] = 'histogram'
elif args.calibrator == "mse":
snake_case__ : str = 'histogram'
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
snake_case__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=A )
snake_case__ : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(A )
quant_nn.QuantLinear.set_default_quant_desc_weight(A )
def lowercase__( A , A , A=False , A=False ):
logger.info('Configuring Model for Quantization' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(A , ['embeddings'] , which='weight' , _disabled=A )
if args.quant_disable:
set_quantizer_by_name(A , [''] , _disabled=A )
if args.quant_disable_keyword:
set_quantizer_by_name(A , args.quant_disable_keyword , _disabled=A )
if args.quant_disable_layer_module:
set_quantizer_by_name(A , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=A )
if args.quant_enable_layer_module:
set_quantizer_by_name(A , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=A )
if args.recalibrate_weights:
recalibrate_weights(A )
if args.fuse_qkv:
fuse_qkv(A , A )
if args.clip_gelu:
clip_gelu(A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(A )
def lowercase__( A ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def lowercase__( A , A ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(A )
def lowercase__( A , A ):
def fusea(A , A , A ):
for mod in [qq, qk, qv]:
if not hasattr(A , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
snake_case__ : Optional[int] = qq._amax.detach().item()
snake_case__ : Union[str, Any] = qk._amax.detach().item()
snake_case__ : Dict = qv._amax.detach().item()
snake_case__ : Optional[int] = max(A , A , A )
qq._amax.fill_(A )
qk._amax.fill_(A )
qv._amax.fill_(A )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowercase__( A , A ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
snake_case__ : List[str] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=A )
snake_case__ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def lowercase__( A ):
for name, mod in model.named_modules():
if hasattr(A , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
snake_case__ : str = mod.weight.shape[0]
snake_case__ : List[Any] = mod._weight_quantizer._amax.detach()
snake_case__ : int = torch.ones(A , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def lowercase__( A ):
for name, mod in model.named_modules():
if hasattr(A , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case__ : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case__ : int = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case__ : Optional[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A , keepdims=A ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
snake_case__ : str = amax
def lowercase__( A , A=2_5 , A=1_8_0 , A=None ):
if ignore is None:
snake_case__ : List[str] = []
elif not isinstance(A , A ):
snake_case__ : Optional[int] = [ignore]
snake_case__ : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(A , 'weight' ):
continue
snake_case__ : int = max(A , len(A ) )
for name, mod in model.named_modules():
snake_case__ : Optional[int] = getattr(A , '_input_quantizer' , A )
snake_case__ : Tuple = getattr(A , '_weight_quantizer' , A )
if not hasattr(A , 'weight' ):
continue
if type(A ) in ignore:
continue
if [True for s in ignore if type(A ) is str and s in name]:
continue
snake_case__ : Dict = f'''Act:{input_q.extra_repr()}'''
snake_case__ : str = f'''Wgt:{weight_q.extra_repr()}'''
snake_case__ : Any = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(A ) <= line_width:
logger.info(A )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def lowercase__( A ):
snake_case__ : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(A , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def lowercase__( A , A , A , A , A ):
snake_case__ : str = getattr(A , A , A )
if quantizer_mod is not None:
assert hasattr(A , A )
setattr(A , A , A )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def lowercase__( A , A , A="both" , **A ):
snake_case__ : Union[str, Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(A , A , '_input_quantizer' , A , A )
if which in ["weight", "both"]:
set_quantizer(A , A , '_weight_quantizer' , A , A )
logger.info(A )
def lowercase__( A , A , **A ):
for name, mod in model.named_modules():
if hasattr(A , '_input_quantizer' ) or hasattr(A , '_weight_quantizer' ):
for n in names:
if re.search(A , A ):
set_quantizers(A , A , **A )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(A , A ):
snake_case__ : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(A , A , A )
logger.info(A )
| 303 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = '''▁'''
lowerCAmelCase_ : Dict = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ : Optional[Any] = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
lowerCAmelCase_ : Optional[int] = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class UpperCamelCase_ ( a_ ):
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , snake_case__ , snake_case__="</s>" , snake_case__="<unk>" , snake_case__=[] , snake_case__ = None , **snake_case__ , ) -> None:
"""simple docstring"""
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , snake_case__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase_ ( self , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase = self.sp_model.IdToPiece(snake_case__ )
return token
def UpperCamelCase_ ( self , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 673 |
"""simple docstring"""
import socket
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase = socket.gethostname()
UpperCAmelCase = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
UpperCAmelCase = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 673 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase__ ( _a : List[str] , _a : str , _a : List[str]=10_24 , _a : int=10_24 , _a : int=False , **_a : Optional[Any] ):
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_a )
snake_case_ : List[Any] = SeqaSeqDataset(_a , _a , _a , _a , type_path="train" , **_a )
snake_case_ : Tuple = tok.pad_token_id
def get_lens(_a : str ):
snake_case_ : Optional[int] = tqdm(
DataLoader(_a , batch_size=5_12 , num_workers=8 , shuffle=_a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
snake_case_ : List[Any] = []
for batch in dl:
snake_case_ : Dict = batch["input_ids"].ne(_a ).sum(1 ).tolist()
snake_case_ : Optional[int] = batch["labels"].ne(_a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_a , _a ):
max_lens.append(max(_a , _a ) )
else:
max_lens.extend(_a )
return max_lens
snake_case_ : str = get_lens(_a )
snake_case_ : Tuple = SeqaSeqDataset(_a , _a , _a , _a , type_path="val" , **_a )
snake_case_ : Optional[int] = get_lens(_a )
pickle_save(_a , train_ds.len_file )
pickle_save(_a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 114 |
import heapq
import sys
import numpy as np
lowercase : str = tuple[int, int]
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
snake_case_ : int = []
snake_case_ : int = set()
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _lowerCAmelCase ( self ) -> Optional[int]:
return len(self.elements ) == 0
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
snake_case_ : Any = []
((snake_case_) , (snake_case_)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) : Optional[int] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if item in self.set:
self.set.remove(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = []
((snake_case_) , (snake_case_)) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) : Dict = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return self.elements[0][1]
def _lowerCAmelCase ( self ) -> Optional[int]:
((snake_case_) , (snake_case_)) : Any = heapq.heappop(self.elements )
self.set.remove(_SCREAMING_SNAKE_CASE )
return (priority, item)
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# euclidean distance
snake_case_ : Optional[Any] = np.array(_a )
snake_case_ : Dict = np.array(_a )
return np.linalg.norm(a - b )
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# integer division by time variable
return consistent_heuristic(_a , _a ) // t
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase__ ( _a : TPos , _a : int , _a : TPos , _a : dict[TPos, float] ):
snake_case_ : List[Any] = g_function[start] + Wa * heuristics[i](_a , _a )
return ans
def lowerCAmelCase__ ( _a : Tuple , _a : Union[str, Any] , _a : List[Any] ):
snake_case_ : Dict = np.chararray((n, n) )
for i in range(_a ):
for j in range(_a ):
snake_case_ : List[str] = "*"
for i in range(_a ):
for j in range(_a ):
if (j, (n - 1) - i) in blocks:
snake_case_ : Optional[Any] = "#"
snake_case_ : Optional[int] = "-"
snake_case_ : Tuple = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) : Any = x
# print(x)
snake_case_ : List[str] = "-"
snake_case_ : Dict = back_pointer[x]
snake_case_ : str = "-"
for i in range(_a ):
for j in range(_a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
snake_case_ : List[Any] = back_pointer[goal]
while x != start:
print(_a , end=" " )
snake_case_ : int = back_pointer[x]
print(_a )
sys.exit()
def lowerCAmelCase__ ( _a : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase__ ( _a : int , _a : str , _a : Optional[int] , _a : List[str] , _a : Optional[Any] , _a : Dict , _a : Tuple , _a : Any , ):
for itera in range(_a ):
open_list[itera].remove_element(_a )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) : Tuple = s
snake_case_ : Dict = (x - 1, y)
snake_case_ : Union[str, Any] = (x + 1, y)
snake_case_ : List[str] = (x, y + 1)
snake_case_ : int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_a )
snake_case_ : Optional[int] = -1
snake_case_ : int = float("inf" )
if valid(_a ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ : int = g_function[s] + 1
snake_case_ : Any = s
if neighbours not in close_list_anchor:
open_list[0].put(_a , key(_a , 0 , _a , _a ) )
if neighbours not in close_list_inad:
for var in range(1 , _a ):
if key(_a , _a , _a , _a ) <= Wa * key(
_a , 0 , _a , _a ):
open_list[j].put(
_a , key(_a , _a , _a , _a ) )
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowercase : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowercase : Union[str, Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowercase : int = make_common_ground()
lowercase : Optional[Any] = blocks_blk
# hyper parameters
lowercase : Optional[Any] = 1
lowercase : str = 1
lowercase : Any = 20
lowercase : str = 3 # one consistent and two other inconsistent
# start and end destination
lowercase : Any = (0, 0)
lowercase : int = (n - 1, n - 1)
lowercase : str = 1
def lowerCAmelCase__ ( _a : TPos , _a : TPos , _a : int ):
snake_case_ : List[str] = {start: 0, goal: float("inf" )}
snake_case_ : List[Any] = {start: -1, goal: -1}
snake_case_ : Optional[Any] = []
snake_case_ : Dict = set()
for i in range(_a ):
open_list.append(PriorityQueue() )
open_list[i].put(_a , key(_a , _a , _a , _a ) )
snake_case_ : list[int] = []
snake_case_ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_a , _a , _a )
else:
snake_case_ , snake_case_ : Optional[Any] = open_list[i].top_show()
visited.add(_a )
expand_state(
_a , _a , _a , _a , _a , _a , _a , _a , )
close_list_inad.append(_a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_a , _a , _a )
else:
snake_case_ : Tuple = open_list[0].top_show()
visited.add(_a )
expand_state(
_a , 0 , _a , _a , _a , _a , _a , _a , )
close_list_anchor.append(_a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 114 | 1 |
def __lowerCamelCase ( __lowerCAmelCase : int ) -> bool:
return str(__lowerCAmelCase ) == str(__lowerCAmelCase )[::-1]
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
return int(__lowerCAmelCase ) + int(str(__lowerCAmelCase )[::-1] )
def __lowerCamelCase ( __lowerCAmelCase : int = 10000 ) -> int:
__UpperCamelCase : Any = []
for num in range(1 , __lowerCAmelCase ):
__UpperCamelCase : int = 0
__UpperCamelCase : Dict = num
while iterations < 50:
__UpperCamelCase : str = sum_reverse(__lowerCAmelCase )
iterations += 1
if is_palindrome(__lowerCAmelCase ):
break
else:
lychrel_nums.append(__lowerCAmelCase )
return len(__lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 269 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
__UpperCamelCase : Dict = _TestCommandArgs(dataset=__lowerCAmelCase , all_configs=__lowerCAmelCase , save_infos=__lowerCAmelCase )
__UpperCamelCase : List[Any] = TestCommand(*__lowerCAmelCase )
test_command.run()
__UpperCamelCase : Any = os.path.join(__lowerCAmelCase , """README.md""" )
assert os.path.exists(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = DatasetInfosDict.from_directory(__lowerCAmelCase )
__UpperCamelCase : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = getattr(dataset_infos["""default"""] , __lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , __lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCAmelCase , __lowerCAmelCase )
elif key == "splits":
assert list(__lowerCAmelCase ) == list(__lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 269 | 1 |
SCREAMING_SNAKE_CASE__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase_ = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(__lowerCamelCase )}'
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str]=2 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: int = 10 , __lowerCamelCase: int = 2 ):
'''simple docstring'''
def get_dataset(__lowerCamelCase: List[Any] ):
lowercase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=None ):
'''simple docstring'''
lowercase_ = []
for epoch in range(__lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ , lowercase_ = batch
lowercase_ = model(__lowerCamelCase )
lowercase_ = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
accelerator.backward(__lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.randn(1 ) )
lowercase_ = nn.Parameter(torch.randn(1 ) )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return x * self.a + self.b
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(total_limit=1 , project_dir=UpperCAmelCase , automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
# Train baseline
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
lowercase_ = os.path.join(UpperCAmelCase , "initial" )
accelerator.save_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
lowercase_ = os.path.join(UpperCAmelCase , "checkpoint" )
accelerator.save_state(UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCAmelCase )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCAmelCase )
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.tensor([1, 2, 3] )
lowercase_ = torch.tensor([2, 3, 4] )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(net.parameters() )
lowercase_ = Accelerator()
with self.assertRaises(UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = torch.optim.lr_scheduler.StepLR(UpperCAmelCase , step_size=1 , gamma=0.99 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
lowercase_ = scheduler.state_dict()
train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(UpperCAmelCase , scheduler.state_dict() )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase , total_limit=2 )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ = accelerator.prepare(UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 601 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=99 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : str="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=0 , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , ) -> str:
"""simple docstring"""
_a = XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , ) -> int:
"""simple docstring"""
_a = XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_a = XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
_a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , ) -> Tuple:
"""simple docstring"""
_a = XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
_a = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , ) -> Dict:
"""simple docstring"""
_a = XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , ) -> Optional[Any]:
"""simple docstring"""
_a = self.num_labels
_a = XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , ) -> Any:
"""simple docstring"""
_a = self.num_choices
_a = XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
_a = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = XLMModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=1 ) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = min_length + idx + 1
_a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=1 ) -> Optional[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowerCAmelCase_ )
_a = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
_a = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case (unittest.TestCase ):
@property
def _a ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self ) -> Any:
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def _a ( self ) -> Union[str, Any]:
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = "A red cat sitting on a park bench"
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=UpperCAmelCase_ ,image=UpperCAmelCase_ ,mask_image=UpperCAmelCase_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=UpperCAmelCase_ ,output_type="np" ,)
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 715 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
lowercase__ = params
lowercase__ = np.array(UpperCAmelCase_ )
lowercase__ = np.array([len(UpperCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,UpperCAmelCase_ ) -> Optional[int]:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Union[str, Any]:
return len(self.lengths )
def _a ( self ) -> int:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self ) -> Optional[int]:
lowercase__ = self.params.max_model_input_size
lowercase__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCAmelCase_ )} too long sequences.''' )
def divide_chunks(UpperCAmelCase_ ,UpperCAmelCase_ ):
return [l[i : i + n] for i in range(0 ,len(UpperCAmelCase_ ) ,UpperCAmelCase_ )]
lowercase__ = []
lowercase__ = []
if self.params.mlm:
lowercase__ , lowercase__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase__ , lowercase__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__ = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__ = np.insert(UpperCAmelCase_ ,0 ,UpperCAmelCase_ )
if sub_s[-1] != sep_id:
lowercase__ = np.insert(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase_ )
new_tok_ids.extend(UpperCAmelCase_ )
new_lengths.extend([len(UpperCAmelCase_ ) for l in sub_seqs] )
lowercase__ = np.array(UpperCAmelCase_ )
lowercase__ = np.array(UpperCAmelCase_ )
def _a ( self ) -> Any:
lowercase__ = len(self )
lowercase__ = self.lengths > 11
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _a ( self ) -> List[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ = self.params.special_tok_ids["unk_token"]
lowercase__ = len(self )
lowercase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__ = (unk_occs / self.lengths) < 0.5
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _a ( self ) -> Optional[int]:
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = [t[0] for t in batch]
lowercase__ = [t[1] for t in batch]
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
# Max for paddings
lowercase__ = max(UpperCAmelCase_ )
# Pad token ids
if self.params.mlm:
lowercase__ = self.params.special_tok_ids["pad_token"]
else:
lowercase__ = self.params.special_tok_ids["unk_token"]
lowercase__ = [list(t.astype(UpperCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase_ )
assert all(len(UpperCAmelCase_ ) == max_seq_len_ for t in tk_ )
lowercase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__ = torch.tensor(UpperCAmelCase_ ) # (bs)
return tk_t, lg_t
| 539 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : Optional[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/'))
_lowercase : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase, 'src/transformers/models/bert/modeling_bert.py'), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py'), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_19)
_lowercase : Union[str, Any] = black.format_str(lowerCamelCase, mode=lowerCamelCase)
_lowercase : Optional[int] = os.path.join(self.transformer_dir, 'new_code.py')
with open(lowerCamelCase, 'w', newline='\n') as f:
f.write(lowerCamelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=lowerCamelCase)
with open(lowerCamelCase, 'r') as f:
self.assertTrue(f.read(), lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', lowerCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', lowerCamelCase), )
# Copy consistency with a really long name
_lowercase : Tuple = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''', F'''{long_class_name}LMPredictionHead''', re.sub('Bert', lowerCamelCase, lowerCamelCase), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', lowerCamelCase, overwrite_result=re.sub('Bert', 'TestModel', lowerCamelCase), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowercase : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowercase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowercase , _lowercase : List[Any] = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['format_model_list'])
self.assertFalse(lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
_lowercase , _lowercase : List[str] = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase)
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowercase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase , _lowercase : Dict = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase, lowerCamelCase)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from collections.abc import Callable
import numpy as np
def UpperCAmelCase__ ( _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = int(np.ceil((x_end - xa) / step_size ) )
a_ = np.zeros((n + 1,) )
a_ = ya
a_ = xa
for k in range(lowerCamelCase__ ):
a_ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[str] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Dict = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Tuple = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Tuple = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Union[str, Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[int] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Union[str, Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> int:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> int:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Union[str, Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Any = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Union[str, Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Dict = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Any = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[int] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Tuple = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Any = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[int] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[int] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[str] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Union[str, Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : str = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Union[str, Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[Any] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : int = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[str] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Optional[int] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : List[str] = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __lowerCAmelCase ( metaclass=_a ):
lowerCamelCase_ : Any = ['''torch''']
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 60 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowercase : int = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[Any] , _lowercase : Path , _lowercase : Union[str, None] = None , _lowercase : Union[List[str], None] = None , _lowercase : Union[str, List[str], None] = None , _lowercase : bool = True , ):
__UpperCAmelCase = [file for file in os.listdir(_lowercase ) if os.path.isfile(os.path.join(_lowercase , _lowercase ) )]
if identifier is not None:
__UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowercase , _lowercase ):
for n_ in n_identifier:
__UpperCAmelCase = [file for file in files if n_ not in file]
else:
__UpperCAmelCase = [file for file in files if n_identifier not in file]
__UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
__UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , _lowercase )
if only_modules:
__UpperCAmelCase = file.split('''.''' )[0]
try:
__UpperCAmelCase = getattr(_lowercase , _lowercase )
__UpperCAmelCase = doctest.DocTestSuite(_lowercase )
__UpperCAmelCase = unittest.TextTestRunner().run(_lowercase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def a ( self : List[Any] ):
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''modeling'''
__UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(_lowercase , identifier=_lowercase , ignore_files=_lowercase )
def a ( self : List[str] ):
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''tokenization'''
self.analyze_directory(_lowercase , identifier=_lowercase )
def a ( self : Dict ):
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''configuration'''
self.analyze_directory(_lowercase , identifier=_lowercase )
def a ( self : List[str] ):
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(_lowercase , n_identifier=_lowercase )
def a ( self : Any ):
__UpperCAmelCase = Path('''docs/source''' )
__UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(_lowercase , ignore_files=_lowercase , only_modules=_lowercase )
| 706 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_lowercase : str = pytest.mark.integration
_lowercase : int = {'comet'}
_lowercase : str = importlib.util.find_spec('fairseq') is not None
_lowercase : Union[str, Any] = {'code_eval'}
_lowercase : Optional[Any] = os.name == 'nt'
_lowercase : Union[str, Any] = {'bertscore', 'frugalscore', 'perplexity'}
_lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowercase__ ( snake_case_ :int ):
@wraps(snake_case_ )
def wrapper(self :List[Any] , snake_case_ :Dict ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , snake_case_ )
return wrapper
def lowercase__ ( snake_case_ :str ):
@wraps(snake_case_ )
def wrapper(self :List[str] , snake_case_ :int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , snake_case_ )
return wrapper
def lowercase__ ( snake_case_ :Union[str, Any] ):
@wraps(snake_case_ )
def wrapper(self :List[Any] , snake_case_ :Union[str, Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , snake_case_ )
return wrapper
def lowercase__ ( ):
__UpperCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@local
class _UpperCAmelCase ( parameterized.TestCase ):
a__ : Union[str, Any] = {}
a__ : Optional[int] = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def a ( self : List[Any] , _lowercase : Any ):
__UpperCAmelCase = '''[...]'''
__UpperCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _lowercase ) ).module_path )
__UpperCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=_lowercase )
# check parameters
__UpperCAmelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_lowercase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__UpperCAmelCase = doctest.testmod(_lowercase , verbose=_lowercase , raise_on_error=_lowercase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def a ( self : List[str] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = '''[...]'''
__UpperCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _lowercase ) ).module_path )
# run doctest
with self.use_local_metrics():
__UpperCAmelCase = doctest.testmod(_lowercase , verbose=_lowercase , raise_on_error=_lowercase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def a ( self : Any , _lowercase : Optional[int] , _lowercase : Any ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_lowercase ):
yield
else:
yield
@contextmanager
def a ( self : Optional[int] ):
def load_local_metric(_lowercase : str , *_lowercase : Dict , **_lowercase : Tuple ):
return load_metric(os.path.join('''metrics''' , _lowercase ) , *_lowercase , **_lowercase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__UpperCAmelCase = load_local_metric
yield
@classmethod
def a ( cls : Tuple , _lowercase : Optional[int] ):
def wrapper(_lowercase : List[str] ):
__UpperCAmelCase = contextmanager(_lowercase )
__UpperCAmelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def lowercase__ ( snake_case_ :str ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : Union[str, Any] ):
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__UpperCAmelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def lowercase__ ( snake_case_ :Optional[int] ):
import torch
def bert_cos_score_idf(snake_case_ :List[str] , snake_case_ :Optional[Any] , *snake_case_ :Tuple , **snake_case_ :str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__UpperCAmelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def lowercase__ ( snake_case_ :Dict ):
def load_from_checkpoint(snake_case_ :List[Any] ):
class _UpperCAmelCase :
def a ( self : Optional[Any] , _lowercase : Tuple , *_lowercase : Dict , **_lowercase : Dict ):
assert len(_lowercase ) == 2
__UpperCAmelCase = [0.19, 0.92]
return scores, sum(_lowercase ) / len(_lowercase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__UpperCAmelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__UpperCAmelCase = load_from_checkpoint
yield
def lowercase__ ( ):
__UpperCAmelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
__UpperCAmelCase = '''ERROR'''
__UpperCAmelCase = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(snake_case_ , match=re.escape(snake_case_ ) ):
metric.compute(predictions=[] , references=[] , scheme=snake_case_ )
| 397 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Tuple = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
import math
import unittest
def __UpperCAmelCase ( __A ) -> bool:
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 475 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowerCAmelCase__ = get_tests_dir("fixtures/vocab.json")
lowerCAmelCase__ = get_tests_dir("fixtures")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = 0
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[Any] = WavaVecaConfig()
__UpperCAmelCase : int = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
__UpperCAmelCase : Tuple = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , __lowercase ) )
copyfile(__lowercase , os.path.join(__lowercase , "vocab.json" ) )
__UpperCAmelCase : Any = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = WavaVecaFeatureExtractor()
__UpperCAmelCase : str = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
__UpperCAmelCase : Tuple = WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowercase , __lowercase ) , "r" ) as f:
__UpperCAmelCase : Any = json.load(__lowercase )
config_dict.pop("processor_class" )
with open(os.path.join(__lowercase , __lowercase ) , "w" ) as f:
f.write(json.dumps(__lowercase ) )
__UpperCAmelCase : int = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor()
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
__UpperCAmelCase : List[str] = WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowercase , __lowercase ) , "r" ) as f:
__UpperCAmelCase : Optional[int] = json.load(__lowercase )
config_dict.pop("processor_class" )
with open(os.path.join(__lowercase , __lowercase ) , "w" ) as f:
f.write(json.dumps(__lowercase ) )
__UpperCAmelCase : int = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(__lowercase )
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(__lowercase , __lowercase ) , "w" ) as f:
f.write("{}" )
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowercase )
__UpperCAmelCase : str = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
__UpperCAmelCase : Tuple = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
__UpperCAmelCase : int = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__UpperCAmelCase : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowercase , use_fast=__lowercase )
__UpperCAmelCase : Tuple = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
try:
AutoConfig.register("custom" , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoProcessor.register(__lowercase , __lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : int = os.path.join(__lowercase , "vocab.txt" )
with open(__lowercase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__UpperCAmelCase : Union[str, Any] = CustomTokenizer(__lowercase )
__UpperCAmelCase : Optional[Any] = CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowercase )
__UpperCAmelCase : Any = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = False
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = False
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = """AutoFeatureExtractor"""
SCREAMING_SNAKE_CASE = """AutoTokenizer"""
SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("custom" , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local classes.
__UpperCAmelCase : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : int = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def lowerCamelCase_ ( cls : int ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Dict = WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , "test-processor" ) , push_to_hub=__lowercase , use_auth_token=self._token )
__UpperCAmelCase : Any = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , "test-processor-org" ) , push_to_hub=__lowercase , use_auth_token=self._token , organization="valid_org" , )
__UpperCAmelCase : int = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCAmelCase : int = CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Union[str, Any] = os.path.join(__lowercase , "vocab.txt" )
with open(__lowercase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__UpperCAmelCase : Tuple = CustomTokenizer(__lowercase )
__UpperCAmelCase : List[str] = CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" , token=self._token )
__UpperCAmelCase : List[Any] = Repository(__lowercase , clone_from=f"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(__lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowercase , "tokenizer_config.json" ) ) as f:
__UpperCAmelCase : Optional[Any] = json.load(__lowercase )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowercase , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , "custom_processing.py" ) ) )
repo.push_to_hub()
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 715 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = DepthEstimationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCAmelCase_ )
import datasets
__UpperCAmelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase : Dict = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , UpperCAmelCase_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "Intel/dpt-large"
__UpperCAmelCase : Optional[int] = pipeline("depth-estimation" , model=UpperCAmelCase_ )
__UpperCAmelCase : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCAmelCase : str = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 329 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Dict =ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase : Optional[Any] =1_2_8
elif "12-12" in model_name:
lowercase : Union[str, Any] =1_2
lowercase : Tuple =1_2
elif "14-14" in model_name:
lowercase : Optional[Any] =1_4
lowercase : List[str] =1_4
elif "16-16" in model_name:
lowercase : Union[str, Any] =1_6
lowercase : Optional[int] =1_6
else:
raise ValueError('''Model not supported''' )
lowercase : List[Any] ='''huggingface/label-files'''
if "speech-commands" in model_name:
lowercase : str =3_5
lowercase : Union[str, Any] ='''speech-commands-v2-id2label.json'''
else:
lowercase : Optional[int] =5_2_7
lowercase : str ='''audioset-id2label.json'''
lowercase : List[str] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Tuple ={int(__A ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : List[str] ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
if "module.v" in name:
lowercase : int =name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
lowercase : Tuple =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
lowercase : int =name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
lowercase : Optional[int] =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : Optional[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
lowercase : List[Any] =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase : int =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : List[Any] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : List[str] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : str =name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase : Optional[int] =name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
lowercase : Any =name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
lowercase : Dict =name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def lowercase_ ( __A : str , __A : List[str] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Dict =orig_state_dict.pop(__A )
if "qkv" in key:
lowercase : Union[str, Any] =key.split('''.''' )
lowercase : Any =int(key_split[3] )
lowercase : int =config.hidden_size
if "weight" in key:
lowercase : str =val[:dim, :]
lowercase : int =val[dim : dim * 2, :]
lowercase : Tuple =val[-dim:, :]
else:
lowercase : Dict =val[:dim]
lowercase : str =val[dim : dim * 2]
lowercase : Optional[int] =val[-dim:]
else:
lowercase : List[Any] =val
return orig_state_dict
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : str =[
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
@torch.no_grad()
def lowercase_ ( __A : str , __A : Optional[Any] , __A : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any =get_audio_spectrogram_transformer_config(__A )
lowercase : List[Any] ={
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
lowercase : Dict =model_name_to_url[model_name]
lowercase : Optional[Any] =torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )
# remove some keys
remove_keys(__A )
# rename some keys
lowercase : Optional[Any] =convert_state_dict(__A , __A )
# load 🤗 model
lowercase : str =ASTForAudioClassification(__A )
model.eval()
model.load_state_dict(__A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase : List[Any] =-4.2677393 if '''speech-commands''' not in model_name else -6.845978
lowercase : Union[str, Any] =4.5689974 if '''speech-commands''' not in model_name else 5.5654526
lowercase : Optional[int] =1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
lowercase : List[Any] =ASTFeatureExtractor(mean=__A , std=__A , max_length=__A )
if "speech-commands" in model_name:
lowercase : Tuple =load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
lowercase : str =dataset[0]['''audio''']['''array''']
else:
lowercase : int =hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
lowercase , lowercase : Optional[int] =torchaudio.load(__A )
lowercase : Any =waveform.squeeze().numpy()
lowercase : Optional[int] =feature_extractor(__A , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
lowercase : Optional[int] =model(**__A )
lowercase : List[Any] =outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase : Union[str, Any] =torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase : str =torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase : Dict =torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase : List[str] =torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase : str =torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase : List[Any] =torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase : Any =torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase : Optional[Any] =torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , __A , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__A )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 94 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase_ ( A_ , A_ ):
assert isinstance(A_ , A_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = tmp_path / '''cache'''
__lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ , keep_in_memory=A_ ).read()
_check_sql_dataset(A_ , A_ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = tmp_path / '''cache'''
__lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCamelCase = features.copy() if features else default_expected_features
__lowerCamelCase = (
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=A_ , cache_dir=A_ ).read()
_check_sql_dataset(A_ , A_ )
def lowerCamelCase_ ( A_ ):
with contextlib.closing(sqlitea.connect(A_ ) ) as con:
__lowerCamelCase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase = tmp_path / '''cache'''
__lowerCamelCase = os.path.join(A_ , '''tmp.sql''' )
__lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read()
SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
__lowerCamelCase = iter_sql_file(A_ )
__lowerCamelCase = iter_sql_file(A_ )
for rowa, rowa in zip(A_ , A_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase = tmp_path / '''cache'''
__lowerCamelCase = os.path.join(A_ , '''tmp.sql''' )
__lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read()
SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
__lowerCamelCase = iter_sql_file(A_ )
__lowerCamelCase = iter_sql_file(A_ )
for rowa, rowa in zip(A_ , A_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase = tmp_path / '''cache'''
__lowerCamelCase = os.path.join(A_ , '''tmp.sql''' )
__lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read()
with pytest.raises(A_ ):
SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 316 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : str = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase__ : Any = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
AutoTokenizer.from_pretrained(lowerCAmelCase__ ).save_pretrained(lowerCAmelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 312 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCamelCase_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : List[str] , *_A : List[Any] , **_A : List[str] ):
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase__ : Any = None
if self.model.config.prefix is not None:
UpperCAmelCase__ : int = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase__ : List[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase__ : Any = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase__ : Optional[int] = {**self._forward_params, **forward_params}
def lowercase_ ( self : Dict , _A : int=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : Dict=None , _A : str=None , _A : List[str]=None , _A : List[str]=None , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Any = {}
if prefix is not None:
UpperCAmelCase__ : Optional[int] = prefix
if prefix:
UpperCAmelCase__ : Any = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase__ : Dict = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
UpperCAmelCase__ : Optional[int] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase__ : Union[str, Any] = generate_kwargs
UpperCAmelCase__ : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase__ : Optional[int] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase__ : Union[str, Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : Optional[int] = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase__ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : Tuple , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : List[Any] , **_A : List[Any] ):
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : List[str]="" , _A : Optional[int]=None , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase__ : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase__ : Any = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase__ : str = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase__ : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase__ : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase__ : Optional[int] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase__ : List[Any] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def lowercase_ ( self : List[Any] , _A : Any , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = model_inputs['''input_ids''']
UpperCAmelCase__ : Tuple = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Tuple = 1
else:
UpperCAmelCase__ : Union[str, Any] = input_ids.shape[0]
UpperCAmelCase__ : int = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase__ : Dict = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase__ : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase__ : Optional[Any] = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase__ : List[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase__ : Optional[Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase__ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : Optional[int] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : List[Any] = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase_ ( self : Any , _A : Dict , _A : Optional[int]=ReturnType.FULL_TEXT , _A : int=True ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase__ : Tuple = model_outputs['''input_ids''']
UpperCAmelCase__ : List[str] = model_outputs['''prompt_text''']
UpperCAmelCase__ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase__ : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : List[str] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase__ : List[str] = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase__ : Union[str, Any] = 0
else:
UpperCAmelCase__ : List[str] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase__ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase__ : List[str] = text[prompt_length:]
UpperCAmelCase__ : Any = {'''generated_text''': all_text}
records.append(_A )
return records
| 312 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__snake_case :str =logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : int = ['input_features', 'attention_mask']
def __init__( self : Tuple , __UpperCamelCase : List[Any]=80 , __UpperCamelCase : int=16_000 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : int=25 , __UpperCamelCase : str="hamming_window" , __UpperCamelCase : List[str]=3_2_7_6_8.0 , __UpperCamelCase : Optional[int]=0.9_7 , __UpperCamelCase : List[Any]=1.0 , __UpperCamelCase : int=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Tuple:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
A = feature_size
A = sampling_rate
A = padding_value
A = hop_length
A = win_length
A = frame_signal_scale
A = preemphasis_coeff
A = mel_floor
A = normalize_means
A = normalize_vars
A = win_function
A = return_attention_mask
A = win_length * sampling_rate // 1_000
A = hop_length * sampling_rate // 1_000
A = optimal_fft_length(self.sample_size )
A = (self.n_fft // 2) + 1
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
A = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
A = window_function(window_length=self.sample_size , name=self.win_function )
A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
A = x[:input_length].mean(axis=0 )
A = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
A = x[:input_length].std(axis=0 )
A = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
A = padding_value
# make sure array is in float32
A = x.astype(np.floataa )
return x
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[np.ndarray] , __UpperCamelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
A = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A = [raw_speech]
# extract fbank features
A = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
A = BatchFeature({'input_features': features} )
A = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
A = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __UpperCamelCase ):
A = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A = self.normalize(
padded_inputs['input_features'] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
A = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 106 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
import argparse
import json
import subprocess
def __A ( _A , _A ):
"""simple docstring"""
__a = []
__a = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
__a = subprocess.run(_A , shell=_A , stdout=subprocess.PIPE )
__a = output.stdout.decode("utf-8" )
__a = json.loads(_A )
__a = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_A )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_A ) )
if len(_A ) > 0:
__a = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __A ( _A ):
"""simple docstring"""
return values.split("," )
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 525 | import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE : str = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
SCREAMING_SNAKE_CASE : Dict = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A ( _A ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
__a = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
__a = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
__a = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , _A )
# ffn -> feed_forward
__a = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
__a = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
__a = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
__a = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
__a = "rwkv." + name
__a = weight
return state_dict
def __A ( _A , _A , _A , _A=None , _A=None , _A=False , _A=None ):
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_A )
__a = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__a = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_A , _A )
__a = torch.load(_A , map_location="cpu" )
__a = convert_state_dict(_A )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
__a = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
__a = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
__a = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 525 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
UpperCamelCase__ : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
UpperCamelCase__ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class lowerCAmelCase_ ( lowercase_ ):
__a : List[str] = """whisper"""
__a : Dict = ["""past_key_values"""]
__a : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self ,snake_case__=51865 ,snake_case__=80 ,snake_case__=6 ,snake_case__=4 ,snake_case__=6 ,snake_case__=4 ,snake_case__=1536 ,snake_case__=1536 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=50257 ,snake_case__=True ,snake_case__=True ,snake_case__="gelu" ,snake_case__=256 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.02 ,snake_case__=False ,snake_case__=1500 ,snake_case__=448 ,snake_case__=50256 ,snake_case__=50256 ,snake_case__=50256 ,snake_case__=None ,snake_case__=[220, 50256] ,snake_case__=False ,snake_case__=256 ,snake_case__=False ,snake_case__=0.05 ,snake_case__=10 ,snake_case__=2 ,snake_case__=0.0 ,snake_case__=10 ,snake_case__=0 ,snake_case__=7 ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = num_mel_bins
SCREAMING_SNAKE_CASE_ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE_ : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = dropout
SCREAMING_SNAKE_CASE_ : int = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_ : Tuple = activation_function
SCREAMING_SNAKE_CASE_ : Dict = init_std
SCREAMING_SNAKE_CASE_ : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_source_positions
SCREAMING_SNAKE_CASE_ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Tuple = classifier_proj_size
SCREAMING_SNAKE_CASE_ : Any = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : List[str] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE_ : int = mask_time_length
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Any = mask_feature_length
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_feature_min_masks
SCREAMING_SNAKE_CASE_ : Dict = median_filter_width
super().__init__(
pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,is_encoder_decoder=snake_case__ ,decoder_start_token_id=snake_case__ ,suppress_tokens=snake_case__ ,begin_suppress_tokens=snake_case__ ,**snake_case__ ,)
class lowerCAmelCase_ ( lowercase_ ):
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ : int = {0: 'batch'}
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ ,direction='inputs' )
return common_inputs
def snake_case ( self ,snake_case__ ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = False ,snake_case__ = None ,snake_case__ = 22050 ,snake_case__ = 5.0 ,snake_case__ = 220 ,):
SCREAMING_SNAKE_CASE_ : str = OrderedDict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=snake_case__ ,framework=snake_case__ ,sampling_rate=snake_case__ ,time_duration=snake_case__ ,frequency=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = encoder_inputs['input_features'].shape[2]
SCREAMING_SNAKE_CASE_ : int = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = encoder_inputs.pop('input_features' )
SCREAMING_SNAKE_CASE_ : int = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE_ : str = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def snake_case ( self ):
return 1E-3
| 105 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(UpperCamelCase )
self.set_fail_transitions()
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(UpperCamelCase , UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCamelCase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCamelCase )
lowercase__ = self.adlist[r]['''fail_state''']
while (
self.find_next_state(UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowercase__ = self.adlist[state]['''fail_state''']
lowercase__ = self.find_next_state(
UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(UpperCamelCase ) ):
while (
self.find_next_state(UpperCamelCase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]['''fail_state''']
lowercase__ = self.find_next_state(UpperCamelCase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( _lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "num_attention_heads" ) )
class __a :
def __init__( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Dict=640 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Dict="silu" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : List[Any]=None , )-> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = last_hidden_size
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = conv_kernel_size
UpperCamelCase = output_stride
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : int )-> Optional[int]:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict )-> List[Any]:
"""simple docstring"""
UpperCamelCase = MobileViTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileViTForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Any )-> Dict:
"""simple docstring"""
UpperCamelCase = MobileViTModelTester(self )
UpperCamelCase = MobileViTConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : str )-> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCAmelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : str )-> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
UpperCamelCase = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 5
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase = 2
for i in range(len(UpperCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Dict:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileViTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Dict:
"""simple docstring"""
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int )-> List[str]:
"""simple docstring"""
UpperCamelCase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(UpperCAmelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCAmelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
UpperCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCamelCase = model.to(UpperCAmelCase_ )
UpperCamelCase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCAmelCase_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
UpperCamelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCamelCase = model.to(UpperCAmelCase_ )
UpperCamelCase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCAmelCase_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(50, 60)] )
UpperCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
UpperCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
| 556 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 556 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
_A : int = ['''pixel_values''']
def __init__( self : Optional[Any] , lowerCAmelCase__ : str = True , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Any] = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Any] = True , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : Optional[Any] = True , lowerCAmelCase__ : Union[str, Any] = 1 / 2_5_5 , lowerCAmelCase__ : Any = True , lowerCAmelCase__ : str = None , lowerCAmelCase__ : int = None , **lowerCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_a )
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {"""shortest_edge""": 2_2_4}
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(_a , default_to_square=_a )
__SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__SCREAMING_SNAKE_CASE : int = get_size_dict(_a , param_name="""crop_size""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
__SCREAMING_SNAKE_CASE : List[str] = size
__SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Dict = crop_size
__SCREAMING_SNAKE_CASE : str = resample
__SCREAMING_SNAKE_CASE : List[str] = do_rescale
__SCREAMING_SNAKE_CASE : Any = rescale_factor
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] = PILImageResampling.BILINEAR , lowerCAmelCase__ : List[Any] = None , **lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" in size:
__SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(_a , size["""shortest_edge"""] , default_to_square=_a )
elif "height" in size and "width" in size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any = None , **lowerCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int = None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = None , **lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : List[Any] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Union[str, Any] = to_numpy_array(_a )
if do_resize:
__SCREAMING_SNAKE_CASE : List[str] = self.resize(image=_a , size=_a , resample=_a )
if do_center_crop:
__SCREAMING_SNAKE_CASE : List[str] = self.center_crop(_a , size=_a )
if do_rescale:
__SCREAMING_SNAKE_CASE : Dict = self.rescale(image=_a , scale=_a )
if do_normalize:
__SCREAMING_SNAKE_CASE : List[Any] = self.normalize(image=_a , mean=_a , std=_a )
__SCREAMING_SNAKE_CASE : Optional[Any] = to_channel_dimension_format(_a , _a )
return image
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : Optional[int] = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : int = get_size_dict(_a , default_to_square=_a )
__SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(_a , param_name="""crop_size""" )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = make_batched(_a )
__SCREAMING_SNAKE_CASE : List[str] = [
[
self._preprocess_image(
image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , )
for img in video
]
for video in videos
]
__SCREAMING_SNAKE_CASE : List[Any] = {"""pixel_values""": videos}
return BatchFeature(data=_a , tensor_type=_a ) | 578 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , _a , _a=2 , _a=True , _a=False , _a=10 , _a=3 , _a=32 * 8 , _a=32 * 8 , _a=4 , _a=64 , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_auxiliary_loss
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_size
lowerCAmelCase_ = max_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = hidden_dim
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
lowerCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
lowerCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
lowerCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
lowerCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ = self.num_queries
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = [1, 1, 1, 1]
lowerCAmelCase_ = self.num_channels
lowerCAmelCase_ = 64
lowerCAmelCase_ = 128
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
return config
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = output.encoder_hidden_states
lowerCAmelCase_ = output.pixel_decoder_hidden_states
lowerCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def __a ( self , _a , _a , _a , _a=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a )
lowerCAmelCase_ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def __a ( self , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a )
lowerCAmelCase_ = model(_a )
comm_check_on_output(_a )
lowerCAmelCase_ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __a ( self ) -> Any:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __a ( self ) -> Optional[int]:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __a ( self ) -> str:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __a ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
@slow
def __a ( self ) -> int:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = (self.model_tester.min_size,) * 2
lowerCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=_a ),
"mask_labels": torch.randn((2, 10, *size) , device=_a ),
"class_labels": torch.zeros(2 , 10 , device=_a ).long(),
}
lowerCAmelCase_ = self.model_tester.get_config()
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
lowerCAmelCase_ = model(**_a )
self.assertTrue(outputs.loss is not None )
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a ).to(_a )
lowerCAmelCase_ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __a ( self ) -> List[str]:
if not self.model_tester.is_training:
return
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.train()
lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a ).to(_a )
model.train()
lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a )
lowerCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> Dict:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __a ( self ) -> Optional[Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a )
lowerCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
lowerCAmelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowerCAmelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowerCAmelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def __a ( self ) -> str:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a )
lowerCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
# masks_queries_logits
lowerCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
lowerCAmelCase_ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
lowerCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase_ = inputs["pixel_values"].to(_a )
lowerCAmelCase_ = [el.to(_a ) for el in inputs["mask_labels"]]
lowerCAmelCase_ = [el.to(_a ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 122 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
def _A ( snake_case__ : Any ):
snake_case__ : Any = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case__ : Optional[int] = 1_28
elif "12-12" in model_name:
snake_case__ : str = 12
snake_case__ : Optional[int] = 12
elif "14-14" in model_name:
snake_case__ : int = 14
snake_case__ : List[Any] = 14
elif "16-16" in model_name:
snake_case__ : Dict = 16
snake_case__ : str = 16
else:
raise ValueError('''Model not supported''' )
snake_case__ : int = '''huggingface/label-files'''
if "speech-commands" in model_name:
snake_case__ : Optional[Any] = 35
snake_case__ : Optional[Any] = '''speech-commands-v2-id2label.json'''
else:
snake_case__ : Any = 5_27
snake_case__ : Any = '''audioset-id2label.json'''
snake_case__ : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : int = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : str = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( snake_case__ : int ):
if "module.v" in name:
snake_case__ : Dict = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
snake_case__ : Any = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
snake_case__ : int = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
snake_case__ : List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
snake_case__ : Union[str, Any] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
snake_case__ : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case__ : List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case__ : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case__ : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case__ : List[str] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
snake_case__ : Tuple = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
snake_case__ : Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( snake_case__ : str , snake_case__ : List[str] ):
for key in orig_state_dict.copy().keys():
snake_case__ : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
snake_case__ : Tuple = key.split('''.''' )
snake_case__ : List[Any] = int(key_split[3] )
snake_case__ : Any = config.hidden_size
if "weight" in key:
snake_case__ : str = val[:dim, :]
snake_case__ : List[str] = val[dim : dim * 2, :]
snake_case__ : Tuple = val[-dim:, :]
else:
snake_case__ : Tuple = val[:dim]
snake_case__ : Optional[Any] = val[dim : dim * 2]
snake_case__ : Tuple = val[-dim:]
else:
snake_case__ : Union[str, Any] = val
return orig_state_dict
def _A ( snake_case__ : Union[str, Any] ):
snake_case__ : Any = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
@torch.no_grad()
def _A ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str]=False ):
snake_case__ : List[str] = get_audio_spectrogram_transformer_config(snake_case__ )
snake_case__ : Union[str, Any] = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
snake_case__ : Union[str, Any] = model_name_to_url[model_name]
snake_case__ : Dict = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )
# remove some keys
remove_keys(snake_case__ )
# rename some keys
snake_case__ : Union[str, Any] = convert_state_dict(snake_case__ , snake_case__ )
# load 🤗 model
snake_case__ : List[Any] = ASTForAudioClassification(snake_case__ )
model.eval()
model.load_state_dict(snake_case__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case__ : Optional[Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
snake_case__ : Tuple = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
snake_case__ : List[Any] = 10_24 if '''speech-commands''' not in model_name else 1_28
snake_case__ : Dict = ASTFeatureExtractor(mean=snake_case__ , std=snake_case__ , max_length=snake_case__ )
if "speech-commands" in model_name:
snake_case__ : Union[str, Any] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
snake_case__ : List[Any] = dataset[0]['''audio''']['''array''']
else:
snake_case__ : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
snake_case__ ,snake_case__ : Optional[Any] = torchaudio.load(snake_case__ )
snake_case__ : List[Any] = waveform.squeeze().numpy()
snake_case__ : Tuple = feature_extractor(snake_case__ , sampling_rate=1_60_00 , return_tensors='''pt''' )
# forward pass
snake_case__ : Optional[int] = model(**snake_case__ )
snake_case__ : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case__ : Dict = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case__ : str = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case__ : Any = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case__ : Optional[Any] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case__ : List[Any] = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case__ : Optional[int] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case__ : int = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case__ : Tuple = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase : str = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 261 |
'''simple docstring'''
def _A ( snake_case__ : list[int] , snake_case__ : list[int] ):
snake_case__ : Tuple = len(snake_case__ )
print('''The following activities are selected:''' )
# The first activity is always selected
snake_case__ : Optional[Any] = 0
print(snake_case__ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=''',''' )
snake_case__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[str] = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 261 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'gpt_bigcode'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase=50257 , lowerCamelCase=1024 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=None , lowerCamelCase="gelu_pytorch_tanh" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1E-5 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=50256 , lowerCamelCase=50256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , **lowerCamelCase , ) -> int:
"""simple docstring"""
snake_case__ : List[str] = vocab_size
snake_case__ : int = n_positions
snake_case__ : Union[str, Any] = n_embd
snake_case__ : Union[str, Any] = n_layer
snake_case__ : Dict = n_head
snake_case__ : List[str] = n_inner
snake_case__ : List[Any] = activation_function
snake_case__ : Union[str, Any] = resid_pdrop
snake_case__ : str = embd_pdrop
snake_case__ : Optional[Any] = attn_pdrop
snake_case__ : int = layer_norm_epsilon
snake_case__ : List[str] = initializer_range
snake_case__ : int = scale_attn_weights
snake_case__ : int = use_cache
snake_case__ : str = attention_softmax_in_fpaa
snake_case__ : Any = scale_attention_softmax_in_fpaa
snake_case__ : Tuple = multi_query
snake_case__ : Union[str, Any] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase__ ( ctypes.Structure ):
"""simple docstring"""
A__ : List[Any] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_, ctypes.byref(UpperCAmelCase_ ) )
A__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_, ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_, ctypes.byref(UpperCAmelCase_ ) )
A__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_, ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def _lowerCamelCase ( ) -> Dict:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 104 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 0
lowercase__ : int = len(UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCamelCase ( UpperCAmelCase ):
if len(UpperCAmelCase ) <= 1:
return arr, 0
lowercase__ : List[str] = len(UpperCAmelCase ) // 2
lowercase__ : Optional[Any] = arr[0:mid]
lowercase__ : Any = arr[mid:]
lowercase__ , lowercase__ : Any = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : List[str] = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : Optional[Any] = _count_cross_inversions(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(UpperCAmelCase ) and j < len(UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCamelCase ( ):
lowercase__ : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ : Dict = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : Union[str, Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ : Optional[int] = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : int = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
# an empty list should also have zero inversions
lowercase__ : Optional[Any] = []
lowercase__ : Any = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : List[Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
if __name__ == "__main__":
main()
| 152 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) | 664 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 664 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 25_00_04
_SCREAMING_SNAKE_CASE = 25_00_20
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = MBartTokenizer
_UpperCamelCase = MBartTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def __snake_case ( self : Tuple) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A_ = MBartTokenizer(A_ , keep_accents=A_)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : int) -> Optional[Any]:
A_ = MBartTokenizer(A_ , keep_accents=A_)
A_ = tokenizer.tokenize('This is a test')
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A_ = tokenizer.convert_tokens_to_ids(A_)
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A_ = tokenizer.convert_ids_to_tokens(A_)
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __snake_case ( self : Optional[int]) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
A_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_)
A_ = self.tokenizer_class.from_pretrained(A_ , **A_)
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(A_)
A_ = tokenizer_p.save_pretrained(A_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
A_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(A_ , A_)
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(A_)
A_ = tokenizer_p.from_pretrained(A_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A_)
# Save tokenizer rust, legacy_format=True
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(A_ , legacy_format=A_)
A_ = tokenizer_p.save_pretrained(A_)
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_)
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(A_)
A_ = tokenizer_p.from_pretrained(A_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_))
shutil.rmtree(A_)
# Save tokenizer rust, legacy_format=False
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(A_ , legacy_format=A_)
A_ = tokenizer_p.save_pretrained(A_)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(A_)
A_ = tokenizer_p.from_pretrained(A_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_))
shutil.rmtree(A_)
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = """facebook/mbart-large-en-ro"""
_UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_UpperCamelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __snake_case ( cls : Tuple) -> int:
A_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO')
A_ = 1
return cls
def __snake_case ( self : List[str]) -> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020)
def __snake_case ( self : Dict) -> List[str]:
A_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_)
def __snake_case ( self : Optional[Any]) -> Any:
self.assertIn(A_ , self.tokenizer.all_special_ids)
A_ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
A_ = self.tokenizer.decode(A_ , skip_special_tokens=A_)
A_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_)
self.assertEqual(A_ , A_)
self.assertNotIn(self.tokenizer.eos_token , A_)
def __snake_case ( self : Tuple) -> Optional[int]:
A_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , A_)
A_ = 10
A_ = self.tokenizer(A_ , max_length=A_ , truncation=A_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , A_)
self.assertEqual(len(A_) , A_)
def __snake_case ( self : List[Any]) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']) , [250_026, 250_001])
def __snake_case ( self : List[str]) -> Dict:
A_ = tempfile.mkdtemp()
A_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_)
A_ = MBartTokenizer.from_pretrained(A_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_)
@require_torch
def __snake_case ( self : int) -> Tuple:
A_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors='pt')
A_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self : Any) -> List[Any]:
A_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
A_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(A_ , A_)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def __snake_case ( self : Union[str, Any]) -> List[str]:
A_ = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='pt')
A_ = self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='pt')
A_ = targets['input_ids']
A_ = shift_tokens_right(A_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def __snake_case ( self : Union[str, Any]) -> int:
A_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(A_) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 366 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( A , A , A , A=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =[], []
UpperCAmelCase__ =list(zip(A , A ) )
UpperCAmelCase__ , UpperCAmelCase__ =sorted_examples[0]
def is_too_big(A ):
return tok(A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ =new_src + " " + src
UpperCAmelCase__ =new_tgt + " " + tgt
if is_too_big(A ) or is_too_big(A ): # cant fit, finalize example
finished_src.append(A )
finished_tgt.append(A )
UpperCAmelCase__ , UpperCAmelCase__ =src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A )
finished_tgt.append(A )
return finished_src, finished_tgt
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =Path(A )
save_path.mkdir(exist_ok=A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ =pack_examples(A , A , A , A )
print(F"""packed {split} split from {len(A )} examples -> {len(A )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(A ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(A , save_path / F"""{split}.source""" )
shutil.copyfile(A , save_path / F"""{split}.target""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A , default=128 )
parser.add_argument("--data_dir" , type=A )
parser.add_argument("--save_path" , type=A )
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 625 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__A = int(input("Enter number: ").strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 713 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__A = "src/transformers"
__A = "docs/source/en/tasks"
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase = f.readlines()
# Find the start prompt.
__lowerCamelCase = 0
while not lines[start_index].startswith(UpperCamelCase__ ):
start_index += 1
start_index += 1
__lowerCamelCase = start_index
while not lines[end_index].startswith(UpperCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(TRANSFORMERS_PATH)
__A = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__A = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = TASK_GUIDE_TO_MODELS[task_guide]
__lowerCamelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase__ , set() )
__lowerCamelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=False ) -> Any:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = _find_text_in_file(
filename=os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
__lowerCamelCase = get_model_list_for_task(UpperCamelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 167 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = "gpt_bigcode"
UpperCAmelCase__ : int = ["past_key_values"]
UpperCAmelCase__ : Optional[int] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0257, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_pytorch_tanh", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCamelCase : int = vocab_size
UpperCamelCase : Any = n_positions
UpperCamelCase : Optional[int] = n_embd
UpperCamelCase : int = n_layer
UpperCamelCase : int = n_head
UpperCamelCase : Any = n_inner
UpperCamelCase : int = activation_function
UpperCamelCase : Tuple = resid_pdrop
UpperCamelCase : int = embd_pdrop
UpperCamelCase : Any = attn_pdrop
UpperCamelCase : int = layer_norm_epsilon
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Dict = scale_attn_weights
UpperCamelCase : Union[str, Any] = use_cache
UpperCamelCase : Dict = attention_softmax_in_fpaa
UpperCamelCase : Any = scale_attention_softmax_in_fpaa
UpperCamelCase : Optional[Any] = multi_query
UpperCamelCase : Any = bos_token_id
UpperCamelCase : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 40 |
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
a__ = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
a__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def __a ( __UpperCAmelCase , __UpperCAmelCase = None ):
if log_level is None:
a__ = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __UpperCAmelCase )
a__ = logging.getLogger(__UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__UpperCAmelCase , {} )
| 194 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a: Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> List[str]:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = '''</s>'''
lowercase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__lowerCAmelCase ) , 1103 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase__ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase__ : int = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowercase__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : str = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 150, '''short example''']
lowercase__ : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Optional[Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : Dict = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _lowerCAmelCase( self ) -> int:
# fmt: off
lowercase__ : List[Any] = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any] = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowercase__ : Dict = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Union[str, Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : str = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase__ : Dict = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 428 | '''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = BloomTokenizerFast
SCREAMING_SNAKE_CASE = BloomTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = "tokenizer_file"
SCREAMING_SNAKE_CASE = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
lowercase__ : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ : Dict = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase__ : List[Any] = tokenizer.batch_encode_plus(__lowerCAmelCase )['''input_ids''']
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=6 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ : str = '''This is a simple input'''
lowercase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ : Dict = ('''This is a simple input''', '''This is a pair''')
lowercase__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ : List[str] = None # Hotfixing padding = None
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = self.get_rust_tokenizer()
lowercase__ : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowerCAmelCase )
lowercase__ : Optional[Any] = next(iter(__lowerCAmelCase ) )['''premise'''] # pick up one data
lowercase__ : List[str] = list(sample_data.values() )
lowercase__ : str = list(map(tokenizer.encode , __lowerCAmelCase ) )
lowercase__ : List[str] = [tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 428 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
UpperCamelCase__ = parse(importlib.metadata.version('torch'))
def lowerCAmelCase_ ( __A, __A, __A ) -> Tuple:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
UpperCAmelCase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(__A, __A ):
UpperCAmelCase__ = parse(importlib.metadata.version(__A ) )
return operation(__A, parse(__A ) )
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return compare_versions(__A, __A, __A )
| 486 | def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = [1]
for i in range(2, __A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ = []
UpperCAmelCase__ = list(range(__A ) )
# Find permutation
while factorials:
UpperCAmelCase__ = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ = divmod(__A, __A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> np.array:
"""simple docstring"""
lowerCAmelCase__ = f'{sampling_rate}'
lowerCAmelCase__ = '1'
lowerCAmelCase__ = 'f32le'
lowerCAmelCase__ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(snake_case__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ = ffmpeg_process.communicate(snake_case__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase__ = output_stream[0]
lowerCAmelCase__ = np.frombuffer(snake_case__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = "f32le" , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = f'{sampling_rate}'
lowerCAmelCase__ = '1'
if format_for_conversion == "s16le":
lowerCAmelCase__ = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowerCAmelCase__ = platform.system()
if system == "Linux":
lowerCAmelCase__ = 'alsa'
lowerCAmelCase__ = 'default'
elif system == "Darwin":
lowerCAmelCase__ = 'avfoundation'
lowerCAmelCase__ = ':0'
elif system == "Windows":
lowerCAmelCase__ = 'dshow'
lowerCAmelCase__ = 'default'
lowerCAmelCase__ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ = _ffmpeg_stream(snake_case__ , snake_case__ )
for item in iterator:
yield item
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = "f32le" , ) -> Union[str, Any]:
"""simple docstring"""
if stream_chunk_s is not None:
lowerCAmelCase__ = stream_chunk_s
else:
lowerCAmelCase__ = chunk_length_s
lowerCAmelCase__ = ffmpeg_microphone(snake_case__ , snake_case__ , format_for_conversion=snake_case__ )
if format_for_conversion == "s16le":
lowerCAmelCase__ = np.intaa
lowerCAmelCase__ = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ = np.floataa
lowerCAmelCase__ = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowerCAmelCase__ = chunk_length_s / 6
lowerCAmelCase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(snake_case__ , (int, float) ):
lowerCAmelCase__ = [stride_length_s, stride_length_s]
lowerCAmelCase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ = datetime.datetime.now()
lowerCAmelCase__ = datetime.timedelta(seconds=snake_case__ )
for item in chunk_bytes_iter(snake_case__ , snake_case__ , stride=(stride_left, stride_right) , stream=snake_case__ ):
# Put everything back in numpy scale
lowerCAmelCase__ = np.frombuffer(item['raw'] , dtype=snake_case__ )
lowerCAmelCase__ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = B''
lowerCAmelCase__ , lowerCAmelCase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowerCAmelCase__ = 0
for raw in iterator:
acc += raw
if stream and len(snake_case__ ) < chunk_len:
lowerCAmelCase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(snake_case__ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ = (_stride_left, stride_right)
lowerCAmelCase__ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase__ = False
yield item
lowerCAmelCase__ = stride_left
lowerCAmelCase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(snake_case__ ) > stride_left:
lowerCAmelCase__ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase__ = False
yield item
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = 2**24 # 16Mo
try:
with subprocess.Popen(snake_case__ , stdout=subprocess.PIPE , bufsize=snake_case__ ) as ffmpeg_process:
while True:
lowerCAmelCase__ = ffmpeg_process.stdout.read(snake_case__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 604 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowerCAmelCase : Any = False
class __snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger '
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=a_ ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=a_ ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger '
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=a_ ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 604 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_: str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_: Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCAmelCase_) - np.asarray(UpperCAmelCase_)) ** 2))
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCAmelCase_ , UpperCAmelCase_)) ** (1 / 2)
if __name__ == "__main__":
def _lowercase ( ):
"""simple docstring"""
from timeit import timeit
print("""Without Numpy""")
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10_000 , globals=globals() , ))
print("""With Numpy""")
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10_000 , globals=globals() , ))
benchmark()
| 648 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : Tuple , __a : Union[str, Any] , __a : Optional[int]=1_3 , __a : Tuple=7 , __a : Dict=True , __a : List[Any]=True , __a : str=True , __a : str=True , __a : Any=9_9 , __a : Optional[Any]=3_2 , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : List[str]=3_7 , __a : Optional[int]="gelu" , __a : str=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=5_1_2 , __a : List[Any]=1_6 , __a : List[Any]=2 , __a : Optional[Any]=0.02 , __a : List[Any]=False , __a : List[Any]=True , __a : Optional[int]="None" , __a : Optional[Any]=3 , __a : Optional[int]=4 , __a : Optional[int]=None , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : int = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : Optional[Any] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Optional[Any] = relative_attention
snake_case__ : Any = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[int] = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = None
if self.use_input_mask:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : str = None
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase ( self : Dict , __a : Optional[int] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase ( self : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : str ):
snake_case__ : Union[str, Any] = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
snake_case__ : Any = model(__a , attention_mask=__a , token_type_ids=__a )[0]
snake_case__ : Union[str, Any] = model(__a , token_type_ids=__a )[0]
snake_case__ : List[str] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase ( self : Tuple , __a : List[str] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : str ):
snake_case__ : Optional[int] = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
snake_case__ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Tuple , __a : int , __a : str , __a : Tuple , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , __a : List[str] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : int = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def lowercase ( self : Tuple , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Any , __a : List[str] , __a : List[Any] , __a : List[str] ):
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Dict , __a : List[str] , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Any ):
snake_case__ : int = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : List[str] , __a : Optional[Any] , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : List[str] , __a : str , __a : str ):
snake_case__ : Any = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = config_and_inputs
snake_case__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = True
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : int = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def lowercase ( self : Optional[Any] ):
snake_case__ : Any = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def lowercase ( self : Tuple ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def lowercase ( self : List[str] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def lowercase ( self : int ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def lowercase ( self : Optional[int] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def lowercase ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def lowercase ( self : Optional[int] ):
pass
@slow
def lowercase ( self : int ):
snake_case__ : List[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
snake_case__ : Any = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
snake_case__ : List[str] = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 648 | 1 |
'''simple docstring'''
from collections import namedtuple
lowercase_ : List[str] = namedtuple('''from_to''', '''from_ to''')
lowercase_ : List[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : str , lowercase_ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(lowercase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(lowercase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
"""simple docstring"""
from collections import defaultdict
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = 1
snake_case_ : int = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCamelCase )
if ret % 2 == 0:
cuts.append(__UpperCamelCase )
return ret
def __lowerCAmelCase ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 10, 9
__lowerCAmelCase : List[str] = defaultdict(list)
__lowerCAmelCase : dict[int, bool] = {}
__lowerCAmelCase : list[int] = []
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 58 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : List[Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( __UpperCamelCase=None ):
if subparsers is not None:
a_ : Optional[Any] = subparsers.add_parser("""test""" )
else:
a_ : List[str] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def _a ( __UpperCamelCase ):
a_ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
a_ : Optional[Any] = script_name
else:
a_ : Optional[Any] = F'''--config_file={args.config_file} {script_name}'''
a_ : Optional[Any] = ["""accelerate-launch"""] + test_args.split()
a_ : List[str] = execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _a ( ):
a_ : List[str] = test_command_parser()
a_ : Optional[int] = parser.parse_args()
test_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 478 |
import numpy as np
__lowerCamelCase = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class a__ :
def __init__( self : Union[str, Any] ):
a_ : List[Any] = np.array(lowerCamelCase_ )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str ):
a_ , a_ : Optional[int] = np.where(letter == self.SQUARE )
a_ : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int ):
a_ : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : str ):
a_ : List[str] = message.lower()
a_ : str = message.replace(""" """ , """""" )
a_ : Union[str, Any] = message.replace("""j""" , """i""" )
a_ : Optional[Any] = np.empty((2, len(lowerCamelCase_ )) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : int = self.letter_to_numbers(message[letter_index] )
a_ : str = numbers[0]
a_ : int = numbers[1]
a_ : int = first_step.reshape(2 * len(lowerCamelCase_ ) )
a_ : Optional[Any] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Optional[Any] = int(second_step[numbers_index * 2] )
a_ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
a_ : Optional[Any] = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : List[str] = encoded_message + letter
return encoded_message
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : str ):
a_ : Optional[Any] = message.lower()
message.replace(""" """ , """""" )
a_ : int = np.empty(2 * len(lowerCamelCase_ ) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : str = self.letter_to_numbers(message[letter_index] )
a_ : Optional[int] = numbers[0]
a_ : Optional[Any] = numbers[1]
a_ : Tuple = first_step.reshape((2, len(lowerCamelCase_ )) )
a_ : Optional[int] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Dict = int(second_step[0, numbers_index] )
a_ : Tuple = int(second_step[1, numbers_index] )
a_ : Tuple = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : Union[str, Any] = decoded_message + letter
return decoded_message
| 478 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( __magic_name__ ):
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "ChineseCLIPImageProcessor"
lowercase_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor")
__UpperCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = self.image_processor
def __call__( self : Any , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
__UpperCAmelCase : Dict = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_)
if images is not None:
__UpperCAmelCase : Any = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_)
if text is not None and images is not None:
__UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_) , tensor_type=UpperCamelCase_)
def a_ ( self : List[str] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : Dict , *UpperCamelCase_ : str , **UpperCamelCase_ : Any):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_)
@property
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Any = self.tokenizer.model_input_names
__UpperCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase_ , )
return self.image_processor_class
| 77 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
A = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
A = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
A = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def a_ ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False):
"""simple docstring"""
__UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 77 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = 'bart'
A_ : Optional[int] = ['past_key_values']
A_ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self : Any , a__ : List[str]=5_0265 , a__ : Optional[Any]=1024 , a__ : int=12 , a__ : Union[str, Any]=4096 , a__ : Dict=16 , a__ : int=12 , a__ : Union[str, Any]=4096 , a__ : Optional[int]=16 , a__ : int=0.0 , a__ : List[str]=0.0 , a__ : Dict="gelu" , a__ : List[Any]=1024 , a__ : Dict=0.1 , a__ : Tuple=0.0 , a__ : str=0.0 , a__ : str=0.0_2 , a__ : Optional[int]=0.0 , a__ : int=False , a__ : Optional[Any]=True , a__ : str=3 , a__ : str=1 , a__ : Dict=0 , a__ : Optional[Any]=2 , a__ : str=True , a__ : str=2 , a__ : Optional[int]=2 , **a__ : str , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = classifier_dropout
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , a__ ):
__snake_case = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def a (self : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(a__ ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def a (self : str ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(a__ , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(a__ ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a (self : Tuple , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
__snake_case = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**a__ , **a__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(a__ , a__ )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(a__ , a__ )
__snake_case = max(a__ , a__ ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(a__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(a__ , a__ ):
common_inputs["past_key_values"].append((torch.zeros(a__ ), torch.zeros(a__ )) )
return common_inputs
def a (self : str , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
__snake_case = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(a__ )
]
return common_inputs
def a (self : Optional[int] , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ):
"""simple docstring"""
__snake_case = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(a__ )
__snake_case = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(a__ , return_tensors=a__ ) )
return common_inputs
def a (self : Optional[int] , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
elif self.task == "causal-lm":
__snake_case = self._generate_dummy_inputs_for_causal_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
else:
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
return common_inputs
def a (self : Any , a__ : int , a__ : Tuple , a__ : Dict , a__ : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(a__ , a__ , a__ , a__ )
else:
__snake_case = super(a__ , self )._flatten_past_key_values_(
a__ , a__ , a__ , a__ )
| 708 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple ) -> Dict:
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__snake_case = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
__snake_case = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCamelCase__ ( snake_case_ : int ) -> List[str]:
if "visual_encoder" in key:
__snake_case = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
__snake_case = re.sub(R'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
__snake_case = re.sub(R'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
__snake_case = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
__snake_case = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
__snake_case = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
__snake_case = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
__snake_case = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
__snake_case = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
__snake_case = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : str=None ) -> str:
if config_path is not None:
__snake_case = BlipConfig.from_pretrained(snake_case_ )
else:
__snake_case = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__snake_case = BlipForConditionalGeneration(snake_case_ ).eval()
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__snake_case = blip_decoder(pretrained=snake_case_ , image_size=384 , vit='''base''' )
__snake_case = pt_model.eval()
__snake_case = pt_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
hf_model.load_state_dict(snake_case_ )
__snake_case = 384
__snake_case = load_demo_image(image_size=snake_case_ , device='''cpu''' )
__snake_case = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__snake_case = tokenizer(['''a picture of'''] ).input_ids
__snake_case = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__snake_case = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__snake_case = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__snake_case = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
__snake_case = vqa_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
__snake_case = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
__snake_case = ['''How many dogs are in this image?''']
__snake_case = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
__snake_case = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__snake_case = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
__snake_case = itm_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(snake_case_ )
__snake_case = rename_key(snake_case_ )
__snake_case = value
__snake_case = BlipForImageTextRetrieval(snake_case_ )
__snake_case = ['''A picture of a woman with a dog sitting in a beach''']
__snake_case = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
__snake_case = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
__snake_case = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
snake_case_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 388 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = CLIPConfig
__UpperCAmelCase : int = ['CLIPEncoderLayer']
def __init__( self , _a ):
super().__init__(_a )
__a = CLIPVisionModelWithProjection(config.vision_config )
__a = nn.Linear(config.vision_config.projection_dim , 1 )
__a = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __UpperCAmelCase ( self , _a , _a , _a=0.5 , _a=0.5 ):
__a = self.vision_model(_a )[0]
__a = self.p_head(_a )
__a = nsfw_detected.flatten()
__a = nsfw_detected > p_threshold
__a = nsfw_detected.tolist()
if any(_a ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(_a ):
if nsfw_detected_:
__a = np.zeros(images[idx].shape )
__a = self.w_head(_a )
__a = watermark_detected.flatten()
__a = watermark_detected > w_threshold
__a = watermark_detected.tolist()
if any(_a ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(_a ):
if watermark_detected_:
__a = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 695 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )-> Dict:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True )-> Dict:
"""simple docstring"""
model.train()
snake_case_ : Optional[int] = model(lowerCAmelCase_ )
snake_case_ : Any = F.mse_loss(lowerCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase_ )
def __UpperCAmelCase ( __magic_name__ , __magic_name__=False )-> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
snake_case_ : Optional[Any] = RegressionModel()
snake_case_ : Dict = deepcopy(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = RegressionDataset(length=80 )
snake_case_ : List[Any] = DataLoader(lowerCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ : str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ : str = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda __magic_name__ : epoch**0.65 )
snake_case_ : Any = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda __magic_name__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = get_training_setup(lowerCAmelCase_ )
# Use a single batch
snake_case_ : Any = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ : int = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ : Dict = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = get_training_setup(lowerCAmelCase_ )
# Use a single batch
snake_case_ : int = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ : List[Any] = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def __UpperCAmelCase ( __magic_name__=False , __magic_name__=False )-> int:
"""simple docstring"""
snake_case_ : Any = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ : Union[str, Any] = get_training_setup(lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ : str = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
GradientState._reset_state()
def __UpperCAmelCase ( __magic_name__=False , __magic_name__=False )-> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ : int = get_training_setup(lowerCAmelCase_ , lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
snake_case_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ : Any = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
snake_case_ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
snake_case_ : int = Accelerator()
snake_case_ : Optional[int] = RegressionDataset(length=80 )
snake_case_ : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 )
snake_case_ : Optional[int] = RegressionDataset(length=96 )
snake_case_ : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 )
snake_case_ : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if iteration < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if batch_num < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = Accelerator()
snake_case_ : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCAmelCase_ , lowerCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 3 , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = "relu" , )-> Optional[Any]:
super().__init__()
lowercase__ = nn.Convad(
_lowerCamelCase , _lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=kernel_size // 2 , groups=_lowerCamelCase , bias=_lowerCamelCase , )
lowercase__ = nn.BatchNormad(_lowerCamelCase )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case_( self , _lowerCamelCase )-> str:
lowercase__ = self.convolution(_lowerCamelCase )
lowercase__ = self.normalization(_lowerCamelCase )
lowercase__ = self.activation(_lowerCamelCase )
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Any:
super().__init__()
lowercase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__ = config.num_channels
def snake_case_( self , _lowerCamelCase )-> Optional[Any]:
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ = self.embedder(_lowerCamelCase )
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 )-> Tuple:
super().__init__()
lowercase__ = nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , stride=_lowerCamelCase , bias=_lowerCamelCase )
lowercase__ = nn.BatchNormad(_lowerCamelCase )
def snake_case_( self , _lowerCamelCase )-> Tensor:
lowercase__ = self.convolution(_lowerCamelCase )
lowercase__ = self.normalization(_lowerCamelCase )
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase )-> int:
super().__init__()
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ = nn.Sequential(
nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def snake_case_( self , _lowerCamelCase )-> Optional[int]:
# b c h w -> b c 1 1
lowercase__ = self.pooler(_lowerCamelCase )
lowercase__ = self.attention(_lowerCamelCase )
lowercase__ = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 )-> Optional[Any]:
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase ) , )
lowercase__ = ACTaFN[config.hidden_act]
def snake_case_( self , _lowerCamelCase )-> Tuple:
lowercase__ = hidden_state
lowercase__ = self.layer(_lowerCamelCase )
lowercase__ = self.shortcut(_lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(_lowerCamelCase )
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 )-> Optional[int]:
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , groups=_lowerCamelCase , activation=config.hidden_act ) , RegNetSELayer(_lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase ) , )
lowercase__ = ACTaFN[config.hidden_act]
def snake_case_( self , _lowerCamelCase )-> List[str]:
lowercase__ = hidden_state
lowercase__ = self.layer(_lowerCamelCase )
lowercase__ = self.shortcut(_lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(_lowerCamelCase )
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 2 , )-> Dict:
super().__init__()
lowercase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , ) , *[layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for _ in range(depth - 1 )] , )
def snake_case_( self , _lowerCamelCase )-> Optional[int]:
lowercase__ = self.layers(_lowerCamelCase )
return hidden_state
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> List[str]:
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowerCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , depth=_lowerCamelCase ) )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True )-> BaseModelOutputWithNoAttention:
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(_lowerCamelCase )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowerCamelCase , hidden_states=_lowerCamelCase )
class __A ( a ):
"""simple docstring"""
A_ = RegNetConfig
A_ = 'regnet'
A_ = 'pixel_values'
A_ = True
def snake_case_( self , _lowerCamelCase )-> Tuple:
if isinstance(_lowerCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase=False )-> Union[str, Any]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = value
_lowerCAmelCase = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Dict:
super().__init__(_lowerCamelCase )
lowercase__ = config
lowercase__ = RegNetEmbeddings(_lowerCamelCase )
lowercase__ = RegNetEncoder(_lowerCamelCase )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None )-> BaseModelOutputWithPoolingAndNoAttention:
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(_lowerCamelCase )
lowercase__ = self.encoder(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(_lowerCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Dict:
super().__init__(_lowerCamelCase )
lowercase__ = config.num_labels
lowercase__ = RegNetModel(_lowerCamelCase )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , )-> ImageClassifierOutputWithNoAttention:
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(_lowerCamelCase )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = '''single_label_classification'''
else:
lowercase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states )
| 161 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : float , lowercase : float ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : Optional[torch.FloatTensor] =None
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any =2
@register_to_config
def __init__( self , lowerCamelCase = 0.0_2 , lowerCamelCase = 100 , lowerCamelCase = 1.0_0_7 , lowerCamelCase = 80 , lowerCamelCase = 0.0_5 , lowerCamelCase = 50 , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = sigma_max
# setable values
__magic_name__ : int = None
__magic_name__ : np.IntTensor = None
__magic_name__ : torch.FloatTensor = None # sigma(t_i)
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple:
"""simple docstring"""
__magic_name__ : Union[str, Any] = num_inference_steps
__magic_name__ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
__magic_name__ : Optional[int] = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
__magic_name__ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__magic_name__ : int = torch.tensor(lowerCamelCase , dtype=torch.floataa , device=lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__magic_name__ : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__magic_name__ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
__magic_name__ : List[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase ).to(sample.device )
__magic_name__ : List[Any] = sigma + gamma * sigma
__magic_name__ : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
__magic_name__ : Tuple = sample_hat + sigma_hat * model_output
__magic_name__ : List[str] = (sample_hat - pred_original_sample) / sigma_hat
__magic_name__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase , derivative=lowerCamelCase , pred_original_sample=lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
__magic_name__ : Tuple = sample_prev + sigma_prev * model_output
__magic_name__ : Dict = (sample_prev - pred_original_sample) / sigma_prev
__magic_name__ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase , derivative=lowerCamelCase , pred_original_sample=lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
raise NotImplementedError()
| 712 |
def lowerCAmelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__magic_name__ : List[str] = [True] * (num + 1)
__magic_name__ : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, UpperCAmelCase ):
__magic_name__ : Any = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 336 | 0 |
"""simple docstring"""
def lowercase ( a__ : int ) -> bool:
_UpperCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase ( a__ : int = 5000 ) -> int:
_UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , _A )]
for i, pentagonal_i in enumerate(_A ):
for j in range(_A , len(_A ) ):
_UpperCamelCase = pentagonal_nums[j]
_UpperCamelCase = pentagonal_i + pentagonal_j
_UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(_A ) and is_pentagonal(_A ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 420 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline
lowerCAmelCase : Dict = ["""prompt"""]
lowerCAmelCase : List[Any] = ["""prompt""", """negative_prompt"""]
lowerCAmelCase : Optional[int] = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase : Dict = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
A__ = PriorTransformer(**UpperCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A__ = CLIPVisionModelWithProjection(UpperCAmelCase__ )
return model
@property
def __A ( self ):
A__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def __A ( self ):
A__ = self.dummy_prior
A__ = self.dummy_image_encoder
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_image_processor
A__ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase__ , clip_sample_range=10.0 , )
A__ = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
A__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
A__ = output.image_embeds
A__ = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -10:]
A__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = True
A__ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
| 491 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase="resnet50" , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=True , lowercase=True , ) -> List[str]:
'''simple docstring'''
a__: str = parent
a__: str = out_indices if out_indices is not None else [4]
a__: str = stage_names
a__: Tuple = out_features
a__: Optional[Any] = backbone
a__: Any = batch_size
a__: Tuple = image_size
a__: Optional[int] = num_channels
a__: List[str] = use_pretrained_backbone
a__: str = is_training
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__: str = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = TimmBackbone(config=lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Optional[int] = model(lowercase)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.prepare_config_and_inputs()
a__: Dict = config_and_inputs
a__: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (TimmBackbone,) if is_torch_available() else ()
a__ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = TimmBackboneModelTester(self)
a__: Tuple = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = 'resnet18'
a__: Tuple = 'microsoft/resnet-18'
a__: str = AutoBackbone.from_pretrained(lowercase , use_timm_backbone=lowercase)
a__: Tuple = AutoBackbone.from_pretrained(lowercase)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__: int = AutoBackbone.from_pretrained(lowercase , use_timm_backbone=lowercase , out_indices=[1, 2, 3])
a__: Union[str, Any] = AutoBackbone.from_pretrained(lowercase , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.')
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: List[Any] = model_class(lowercase)
a__: Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: List[str] = [*signature.parameters.keys()]
a__: List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs_for_common()
a__: Optional[Any] = True
a__: List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
a__: List[Any] = self.all_model_classes[0]
a__: Tuple = model_class(lowercase)
model.to(lowercase)
a__: Dict = self._prepare_for_class(lowercase , lowercase)
a__: str = model(**lowercase)
a__: List[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
a__: str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__: Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Dict = model_class(lowercase)
model.to(lowercase)
model.eval()
a__: str = model(**lowercase)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__: Tuple = copy.deepcopy(lowercase)
a__: Optional[int] = None
a__: int = model_class(lowercase)
model.to(lowercase)
model.eval()
a__: Optional[int] = model(**lowercase)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__: List[str] = copy.deepcopy(lowercase)
a__: Any = False
a__: Dict = model_class(lowercase)
model.to(lowercase)
model.eval()
a__: Dict = model(**lowercase)
| 712 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['PoolFormerFeatureExtractor']
lowercase__ = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 217 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.